diff --git a/[refs] b/[refs]
index 594a8b3a5514..75853b5fe0f0 100644
--- a/[refs]
+++ b/[refs]
@@ -1,2 +1,2 @@
---
-refs/heads/master: fba2369e6ceb7ae688e91063821bae5140e26393
+refs/heads/master: 534ada5ede3c9236f0bf3c5d77e0d060e66caa4f
diff --git a/trunk/Documentation/DocBook/device-drivers.tmpl b/trunk/Documentation/DocBook/device-drivers.tmpl
index c36892c072da..7514dbf0a679 100644
--- a/trunk/Documentation/DocBook/device-drivers.tmpl
+++ b/trunk/Documentation/DocBook/device-drivers.tmpl
@@ -227,7 +227,7 @@ X!Isound/sound_firmware.c
16x50 UART Driver
!Edrivers/tty/serial/serial_core.c
-!Edrivers/tty/serial/8250/8250_core.c
+!Edrivers/tty/serial/8250/8250.c
diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt
index 8ccbf27aead4..4609e81dbc37 100644
--- a/trunk/Documentation/kernel-parameters.txt
+++ b/trunk/Documentation/kernel-parameters.txt
@@ -596,6 +596,9 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
is selected automatically. Check
Documentation/kdump/kdump.txt for further details.
+ crashkernel_low=size[KMG]
+ [KNL, x86] parts under 4G.
+
crashkernel=range1:size1[,range2:size2,...][@offset]
[KNL] Same as above, but depends on the memory
in the running system. The syntax of range is
@@ -603,26 +606,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
a memory unit (amount[KMG]). See also
Documentation/kdump/kdump.txt for an example.
- crashkernel=size[KMG],high
- [KNL, x86_64] range could be above 4G. Allow kernel
- to allocate physical memory region from top, so could
- be above 4G if system have more than 4G ram installed.
- Otherwise memory region will be allocated below 4G, if
- available.
- It will be ignored if crashkernel=X is specified.
- crashkernel=size[KMG],low
- [KNL, x86_64] range under 4G. When crashkernel=X,high
- is passed, kernel could allocate physical memory region
- above 4G, that cause second kernel crash on system
- that require some amount of low memory, e.g. swiotlb
- requires at least 64M+32K low memory. Kernel would
- try to allocate 72M below 4G automatically.
- This one let user to specify own low range under 4G
- for second kernel instead.
- 0: to disable low allocation.
- It will be ignored when crashkernel=X,high is not used
- or memory reserved is below 4G.
-
cs89x0_dma= [HW,NET]
Format:
@@ -805,12 +788,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
edd= [EDD]
Format: {"off" | "on" | "skip[mbr]"}
- efi_no_storage_paranoia [EFI; X86]
- Using this parameter you can use more than 50% of
- your efi variable storage. Use this parameter only if
- you are really sure that your UEFI does sane gc and
- fulfills the spec otherwise your board may brick.
-
eisa_irq_edge= [PARISC,HW]
See header of drivers/parisc/eisa.c.
diff --git a/trunk/Documentation/powerpc/00-INDEX b/trunk/Documentation/powerpc/00-INDEX
index dd9e92802ec0..5620fb5ac425 100644
--- a/trunk/Documentation/powerpc/00-INDEX
+++ b/trunk/Documentation/powerpc/00-INDEX
@@ -14,6 +14,10 @@ hvcs.txt
- IBM "Hypervisor Virtual Console Server" Installation Guide
mpc52xx.txt
- Linux 2.6.x on MPC52xx family
+sound.txt
+ - info on sound support under Linux/PPC
+zImage_layout.txt
+ - info on the kernel images for Linux/PPC
qe_firmware.txt
- describes the layout of firmware binaries for the Freescale QUICC
Engine and the code that parses and uploads the microcode therein.
diff --git a/trunk/Documentation/powerpc/ptrace.txt b/trunk/Documentation/powerpc/ptrace.txt
index 99c5ce88d0fe..f2a7a3919772 100644
--- a/trunk/Documentation/powerpc/ptrace.txt
+++ b/trunk/Documentation/powerpc/ptrace.txt
@@ -40,7 +40,6 @@ features will have bits indicating whether there is support for:
#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x2
#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x4
#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x8
-#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x10
2. PTRACE_SETHWDEBUG
diff --git a/trunk/Documentation/powerpc/sound.txt b/trunk/Documentation/powerpc/sound.txt
new file mode 100644
index 000000000000..df23d95e03a0
--- /dev/null
+++ b/trunk/Documentation/powerpc/sound.txt
@@ -0,0 +1,81 @@
+ Information about PowerPC Sound support
+=====================================================================
+
+Please mail me (Cort Dougan, cort@fsmlabs.com) if you have questions,
+comments or corrections.
+
+Last Change: 6.16.99
+
+This just covers sound on the PReP and CHRP systems for now and later
+will contain information on the PowerMac's.
+
+Sound on PReP has been tested and is working with the PowerStack and IBM
+Power Series onboard sound systems which are based on the cs4231(2) chip.
+The sound options when doing the make config are a bit different from
+the default, though.
+
+The I/O base, irq and dma lines that you enter during the make config
+are ignored and are set when booting according to the machine type.
+This is so that one binary can be used for Motorola and IBM machines
+which use different values and isn't allowed by the driver, so things
+are hacked together in such a way as to allow this information to be
+set automatically on boot.
+
+1. Motorola PowerStack PReP machines
+
+ Enable support for "Crystal CS4232 based (PnP) cards" and for the
+ Microsoft Sound System. The MSS isn't used, but some of the routines
+ that the CS4232 driver uses are in it.
+
+ Although the options you set are ignored and determined automatically
+ on boot these are included for information only:
+
+ (830) CS4232 audio I/O base 530, 604, E80 or F40
+ (10) CS4232 audio IRQ 5, 7, 9, 11, 12 or 15
+ (6) CS4232 audio DMA 0, 1 or 3
+ (7) CS4232 second (duplex) DMA 0, 1 or 3
+
+ This will allow simultaneous record and playback, as 2 different dma
+ channels are used.
+
+ The sound will be all left channel and very low volume since the
+ auxiliary input isn't muted by default. I had the changes necessary
+ for this in the kernel but the sound driver maintainer didn't want
+ to include them since it wasn't common in other machines. To fix this
+ you need to mute it using a mixer utility of some sort (if you find one
+ please let me know) or by patching the driver yourself and recompiling.
+
+ There is a problem on the PowerStack 2's (PowerStack Pro's) using a
+ different irq/drq than the kernel expects. Unfortunately, I don't know
+ which irq/drq it is so if anyone knows please email me.
+
+ Midi is not supported since the cs4232 driver doesn't support midi yet.
+
+2. IBM PowerPersonal PReP machines
+
+ I've only tested sound on the Power Personal Series of IBM workstations
+ so if you try it on others please let me know the result. I'm especially
+ interested in the 43p's sound system, which I know nothing about.
+
+ Enable support for "Crystal CS4232 based (PnP) cards" and for the
+ Microsoft Sound System. The MSS isn't used, but some of the routines
+ that the CS4232 driver uses are in it.
+
+ Although the options you set are ignored and determined automatically
+ on boot these are included for information only:
+
+ (530) CS4232 audio I/O base 530, 604, E80 or F40
+ (5) CS4232 audio IRQ 5, 7, 9, 11, 12 or 15
+ (1) CS4232 audio DMA 0, 1 or 3
+ (7) CS4232 second (duplex) DMA 0, 1 or 3
+ (330) CS4232 MIDI I/O base 330, 370, 3B0 or 3F0
+ (9) CS4232 MIDI IRQ 5, 7, 9, 11, 12 or 15
+
+ This setup does _NOT_ allow for recording yet.
+
+ Midi is not supported since the cs4232 driver doesn't support midi yet.
+
+2. IBM CHRP
+
+ I have only tested this on the 43P-150. Build the kernel with the cs4232
+ set as a module and load the module with irq=9 dma=1 dma2=2 io=0x550
diff --git a/trunk/Documentation/powerpc/zImage_layout.txt b/trunk/Documentation/powerpc/zImage_layout.txt
new file mode 100644
index 000000000000..048e0150f571
--- /dev/null
+++ b/trunk/Documentation/powerpc/zImage_layout.txt
@@ -0,0 +1,47 @@
+ Information about the Linux/PPC kernel images
+=====================================================================
+
+Please mail me (Cort Dougan, cort@fsmlabs.com) if you have questions,
+comments or corrections.
+
+This document is meant to answer several questions I've had about how
+the PReP system boots and how Linux/PPC interacts with that mechanism.
+It would be nice if we could have information on how other architectures
+boot here as well. If you have anything to contribute, please
+let me know.
+
+
+1. PReP boot file
+
+ This is the file necessary to boot PReP systems from floppy or
+ hard drive. The firmware reads the PReP partition table entry
+ and will load the image accordingly.
+
+ To boot the zImage, copy it onto a floppy with dd if=zImage of=/dev/fd0h1440
+ or onto a PReP hard drive partition with dd if=zImage of=/dev/sda4
+ assuming you've created a PReP partition (type 0x41) with fdisk on
+ /dev/sda4.
+
+ The layout of the image format is:
+
+ 0x0 +------------+
+ | | PReP partition table entry
+ | |
+ 0x400 +------------+
+ | | Bootstrap program code + data
+ | |
+ | |
+ +------------+
+ | | compressed kernel, elf header removed
+ +------------+
+ | | initrd (if loaded)
+ +------------+
+ | | Elf section table for bootstrap program
+ +------------+
+
+
+2. MBX boot file
+
+ The MBX boards can load an elf image, and relocate it to the
+ proper location in memory - it copies the image to the location it was
+ linked at.
diff --git a/trunk/Documentation/scsi/LICENSE.qla2xxx b/trunk/Documentation/scsi/LICENSE.qla2xxx
index 5020b7b5a244..27a91cf43d6d 100644
--- a/trunk/Documentation/scsi/LICENSE.qla2xxx
+++ b/trunk/Documentation/scsi/LICENSE.qla2xxx
@@ -1,4 +1,4 @@
-Copyright (c) 2003-2013 QLogic Corporation
+Copyright (c) 2003-2012 QLogic Corporation
QLogic Linux FC-FCoE Driver
This program includes a device driver for Linux 3.x.
diff --git a/trunk/Documentation/sound/alsa/ALSA-Configuration.txt b/trunk/Documentation/sound/alsa/ALSA-Configuration.txt
index 95731a08f257..4499bd948860 100644
--- a/trunk/Documentation/sound/alsa/ALSA-Configuration.txt
+++ b/trunk/Documentation/sound/alsa/ALSA-Configuration.txt
@@ -890,8 +890,9 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
enable_msi - Enable Message Signaled Interrupt (MSI) (default = off)
power_save - Automatic power-saving timeout (in second, 0 =
disable)
- power_save_controller - Reset HD-audio controller in power-saving mode
- (default = on)
+ power_save_controller - Support runtime D3 of HD-audio controller
+ (-1 = on for supported chip (default), false = off,
+ true = force to on even for unsupported hardware)
align_buffer_size - Force rounding of buffer/period sizes to multiples
of 128 bytes. This is more efficient in terms of memory
access but isn't required by the HDA spec and prevents
diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS
index 8bdd7a7ef2f4..74e58a4d035b 100644
--- a/trunk/MAINTAINERS
+++ b/trunk/MAINTAINERS
@@ -4941,12 +4941,6 @@ W: logfs.org
S: Maintained
F: fs/logfs/
-LPC32XX MACHINE SUPPORT
-M: Roland Stigge
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-F: arch/arm/mach-lpc32xx/
-
LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
M: Nagalakshmi Nandigama
M: Sreekanth Reddy
@@ -5071,8 +5065,9 @@ S: Maintained
F: drivers/net/ethernet/marvell/sk*
MARVELL LIBERTAS WIRELESS DRIVER
+M: Dan Williams
L: libertas-dev@lists.infradead.org
-S: Orphan
+S: Maintained
F: drivers/net/wireless/libertas/
MARVELL MV643XX ETHERNET DRIVER
@@ -5574,7 +5569,6 @@ F: include/uapi/linux/if_*
F: include/uapi/linux/netdevice.h
NETXEN (1/10) GbE SUPPORT
-M: Manish Chopra
M: Sony Chacko
M: Rajesh Borundia
L: netdev@vger.kernel.org
@@ -6631,7 +6625,7 @@ S: Supported
F: fs/reiserfs/
REGISTER MAP ABSTRACTION
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/regmap.git
S: Supported
F: drivers/base/regmap/
@@ -6957,6 +6951,7 @@ F: drivers/scsi/st*
SCTP PROTOCOL
M: Vlad Yasevich
+M: Sridhar Samudrala
M: Neil Horman
L: linux-sctp@vger.kernel.org
W: http://lksctp.sourceforge.net
@@ -7379,7 +7374,7 @@ F: sound/
SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC)
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git
L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://alsa-project.org/main/index.php/ASoC
@@ -7468,7 +7463,7 @@ F: drivers/clk/spear/
SPI SUBSYSTEM
M: Grant Likely
-M: Mark Brown
+M: Mark Brown
L: spi-devel-general@lists.sourceforge.net
Q: http://patchwork.kernel.org/project/spi-devel-general/list/
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -8713,7 +8708,7 @@ F: drivers/scsi/vmw_pvscsi.h
VOLTAGE AND CURRENT REGULATOR FRAMEWORK
M: Liam Girdwood
-M: Mark Brown
+M: Mark Brown
W: http://opensource.wolfsonmicro.com/node/15
W: http://www.slimlogic.co.uk/?p=48
T: git git://git.kernel.org/pub/scm/linux/kernel/git/lrg/regulator.git
diff --git a/trunk/Makefile b/trunk/Makefile
index 46263d808876..58a165b02af1 100644
--- a/trunk/Makefile
+++ b/trunk/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 9
SUBLEVEL = 0
-EXTRAVERSION = -rc8
+EXTRAVERSION = -rc5
NAME = Unicycling Gorilla
# *DOCUMENTATION*
@@ -513,8 +513,7 @@ ifeq ($(KBUILD_EXTMOD),)
# Carefully list dependencies so we do not try to build scripts twice
# in parallel
PHONY += scripts
-scripts: scripts_basic include/config/auto.conf include/config/tristate.conf \
- asm-generic
+scripts: scripts_basic include/config/auto.conf include/config/tristate.conf
$(Q)$(MAKE) $(build)=$(@)
# Objects we will link into vmlinux / subdirs we need to visit
diff --git a/trunk/arch/alpha/Makefile b/trunk/arch/alpha/Makefile
index 2cc3cc519c54..4759fe751aa1 100644
--- a/trunk/arch/alpha/Makefile
+++ b/trunk/arch/alpha/Makefile
@@ -12,7 +12,7 @@ NM := $(NM) -B
LDFLAGS_vmlinux := -static -N #-relax
CHECKFLAGS += -D__alpha__ -m64
-cflags-y := -pipe -mno-fp-regs -ffixed-8
+cflags-y := -pipe -mno-fp-regs -ffixed-8 -msmall-data
cflags-y += $(call cc-option, -fno-jump-tables)
cpuflags-$(CONFIG_ALPHA_EV4) := -mcpu=ev4
diff --git a/trunk/arch/alpha/include/asm/floppy.h b/trunk/arch/alpha/include/asm/floppy.h
index bae97eb19d26..46cefbd50e73 100644
--- a/trunk/arch/alpha/include/asm/floppy.h
+++ b/trunk/arch/alpha/include/asm/floppy.h
@@ -26,7 +26,7 @@
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size) /* nothing */
#define fd_request_irq() request_irq(FLOPPY_IRQ, floppy_interrupt,\
- 0, "floppy", NULL)
+ IRQF_DISABLED, "floppy", NULL)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#ifdef CONFIG_PCI
diff --git a/trunk/arch/alpha/kernel/irq.c b/trunk/arch/alpha/kernel/irq.c
index 7b2be251c30f..2872accd2215 100644
--- a/trunk/arch/alpha/kernel/irq.c
+++ b/trunk/arch/alpha/kernel/irq.c
@@ -117,6 +117,13 @@ handle_irq(int irq)
return;
}
+ /*
+ * From here we must proceed with IPL_MAX. Note that we do not
+ * explicitly enable interrupts afterwards - some MILO PALcode
+ * (namely LX164 one) seems to have severe problems with RTI
+ * at IPL 0.
+ */
+ local_irq_disable();
irq_enter();
generic_handle_irq_desc(irq, desc);
irq_exit();
diff --git a/trunk/arch/alpha/kernel/irq_alpha.c b/trunk/arch/alpha/kernel/irq_alpha.c
index f433fc11877a..772ddfdb71a8 100644
--- a/trunk/arch/alpha/kernel/irq_alpha.c
+++ b/trunk/arch/alpha/kernel/irq_alpha.c
@@ -45,14 +45,6 @@ do_entInt(unsigned long type, unsigned long vector,
unsigned long la_ptr, struct pt_regs *regs)
{
struct pt_regs *old_regs;
-
- /*
- * Disable interrupts during IRQ handling.
- * Note that there is no matching local_irq_enable() due to
- * severe problems with RTI at IPL0 and some MILO PALcode
- * (namely LX164).
- */
- local_irq_disable();
switch (type) {
case 0:
#ifdef CONFIG_SMP
@@ -70,6 +62,7 @@ do_entInt(unsigned long type, unsigned long vector,
{
long cpu;
+ local_irq_disable();
smp_percpu_timer_interrupt(regs);
cpu = smp_processor_id();
if (cpu != boot_cpuid) {
@@ -229,6 +222,7 @@ process_mcheck_info(unsigned long vector, unsigned long la_ptr,
struct irqaction timer_irqaction = {
.handler = timer_interrupt,
+ .flags = IRQF_DISABLED,
.name = "timer",
};
diff --git a/trunk/arch/alpha/kernel/sys_nautilus.c b/trunk/arch/alpha/kernel/sys_nautilus.c
index 1383f8601a93..4d4c046f708d 100644
--- a/trunk/arch/alpha/kernel/sys_nautilus.c
+++ b/trunk/arch/alpha/kernel/sys_nautilus.c
@@ -188,10 +188,6 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr)
extern void free_reserved_mem(void *, void *);
extern void pcibios_claim_one_bus(struct pci_bus *);
-static struct resource irongate_io = {
- .name = "Irongate PCI IO",
- .flags = IORESOURCE_IO,
-};
static struct resource irongate_mem = {
.name = "Irongate PCI MEM",
.flags = IORESOURCE_MEM,
@@ -213,7 +209,6 @@ nautilus_init_pci(void)
irongate = pci_get_bus_and_slot(0, 0);
bus->self = irongate;
- bus->resource[0] = &irongate_io;
bus->resource[1] = &irongate_mem;
pci_bus_size_bridges(bus);
diff --git a/trunk/arch/alpha/kernel/sys_titan.c b/trunk/arch/alpha/kernel/sys_titan.c
index a53cf03f49d5..5cf4a481b8c5 100644
--- a/trunk/arch/alpha/kernel/sys_titan.c
+++ b/trunk/arch/alpha/kernel/sys_titan.c
@@ -280,15 +280,15 @@ titan_late_init(void)
* all reported to the kernel as machine checks, so the handler
* is a nop so it can be called to count the individual events.
*/
- titan_request_irq(63+16, titan_intr_nop, 0,
+ titan_request_irq(63+16, titan_intr_nop, IRQF_DISABLED,
"CChip Error", NULL);
- titan_request_irq(62+16, titan_intr_nop, 0,
+ titan_request_irq(62+16, titan_intr_nop, IRQF_DISABLED,
"PChip 0 H_Error", NULL);
- titan_request_irq(61+16, titan_intr_nop, 0,
+ titan_request_irq(61+16, titan_intr_nop, IRQF_DISABLED,
"PChip 1 H_Error", NULL);
- titan_request_irq(60+16, titan_intr_nop, 0,
+ titan_request_irq(60+16, titan_intr_nop, IRQF_DISABLED,
"PChip 0 C_Error", NULL);
- titan_request_irq(59+16, titan_intr_nop, 0,
+ titan_request_irq(59+16, titan_intr_nop, IRQF_DISABLED,
"PChip 1 C_Error", NULL);
/*
@@ -348,9 +348,9 @@ privateer_init_pci(void)
* Hook a couple of extra err interrupts that the
* common titan code won't.
*/
- titan_request_irq(53+16, titan_intr_nop, 0,
+ titan_request_irq(53+16, titan_intr_nop, IRQF_DISABLED,
"NMI", NULL);
- titan_request_irq(50+16, titan_intr_nop, 0,
+ titan_request_irq(50+16, titan_intr_nop, IRQF_DISABLED,
"Temperature Warning", NULL);
/*
diff --git a/trunk/arch/arc/include/asm/irqflags.h b/trunk/arch/arc/include/asm/irqflags.h
index eac071668201..ccd84806b62f 100644
--- a/trunk/arch/arc/include/asm/irqflags.h
+++ b/trunk/arch/arc/include/asm/irqflags.h
@@ -39,7 +39,7 @@ static inline long arch_local_irq_save(void)
" flag.nz %0 \n"
: "=r"(temp), "=r"(flags)
: "n"((STATUS_E1_MASK | STATUS_E2_MASK))
- : "memory", "cc");
+ : "cc");
return flags;
}
@@ -53,8 +53,7 @@ static inline void arch_local_irq_restore(unsigned long flags)
__asm__ __volatile__(
" flag %0 \n"
:
- : "r"(flags)
- : "memory");
+ : "r"(flags));
}
/*
@@ -74,8 +73,7 @@ static inline void arch_local_irq_disable(void)
" and %0, %0, %1 \n"
" flag %0 \n"
: "=&r"(temp)
- : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK))
- : "memory");
+ : "n"(~(STATUS_E1_MASK | STATUS_E2_MASK)));
}
/*
@@ -87,9 +85,7 @@ static inline long arch_local_save_flags(void)
__asm__ __volatile__(
" lr %0, [status32] \n"
- : "=&r"(temp)
- :
- : "memory");
+ : "=&r"(temp));
return temp;
}
diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig
index 1cacda426a0e..13b739469c51 100644
--- a/trunk/arch/arm/Kconfig
+++ b/trunk/arch/arm/Kconfig
@@ -1183,9 +1183,9 @@ config ARM_NR_BANKS
default 8
config IWMMXT
- bool "Enable iWMMXt support" if !CPU_PJ4
+ bool "Enable iWMMXt support"
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4
- default y if PXA27x || PXA3xx || ARCH_MMP || CPU_PJ4
+ default y if PXA27x || PXA3xx || ARCH_MMP
help
Enable support for iWMMXt context switching at run time if
running on a CPU that supports it.
@@ -1439,16 +1439,6 @@ config ARM_ERRATA_775420
to deadlock. This workaround puts DSB before executing ISB if
an abort may occur on cache maintenance.
-config ARM_ERRATA_798181
- bool "ARM errata: TLBI/DSB failure on Cortex-A15"
- depends on CPU_V7 && SMP
- help
- On Cortex-A15 (r0p0..r3p2) the TLBI*IS/DSB operations are not
- adequately shooting down all use of the old entries. This
- option enables the Linux kernel workaround for this erratum
- which sends an IPI to the CPUs that are running the same ASID
- as the one being invalidated.
-
endmenu
source "arch/arm/common/Kconfig"
diff --git a/trunk/arch/arm/boot/dts/armada-370-mirabox.dts b/trunk/arch/arm/boot/dts/armada-370-mirabox.dts
index 3234875824dc..dd0c57dd9f30 100644
--- a/trunk/arch/arm/boot/dts/armada-370-mirabox.dts
+++ b/trunk/arch/arm/boot/dts/armada-370-mirabox.dts
@@ -54,7 +54,7 @@
};
mvsdio@d00d4000 {
- pinctrl-0 = <&sdio_pins3>;
+ pinctrl-0 = <&sdio_pins2>;
pinctrl-names = "default";
status = "okay";
/*
diff --git a/trunk/arch/arm/boot/dts/armada-370.dtsi b/trunk/arch/arm/boot/dts/armada-370.dtsi
index a195debb67d3..8188d138020e 100644
--- a/trunk/arch/arm/boot/dts/armada-370.dtsi
+++ b/trunk/arch/arm/boot/dts/armada-370.dtsi
@@ -59,12 +59,6 @@
"mpp50", "mpp51", "mpp52";
marvell,function = "sd0";
};
-
- sdio_pins3: sdio-pins3 {
- marvell,pins = "mpp48", "mpp49", "mpp50",
- "mpp51", "mpp52", "mpp53";
- marvell,function = "sd0";
- };
};
gpio0: gpio@d0018100 {
diff --git a/trunk/arch/arm/boot/dts/dbx5x0.dtsi b/trunk/arch/arm/boot/dts/dbx5x0.dtsi
index aaa63d0a8096..9de93096601a 100644
--- a/trunk/arch/arm/boot/dts/dbx5x0.dtsi
+++ b/trunk/arch/arm/boot/dts/dbx5x0.dtsi
@@ -191,8 +191,8 @@
prcmu: prcmu@80157000 {
compatible = "stericsson,db8500-prcmu";
- reg = <0x80157000 0x1000>, <0x801b0000 0x8000>, <0x801b8000 0x1000>;
- reg-names = "prcmu", "prcmu-tcpm", "prcmu-tcdm";
+ reg = <0x80157000 0x1000>;
+ reg-names = "prcmu";
interrupts = <0 47 0x4>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/trunk/arch/arm/boot/dts/imx28-m28evk.dts b/trunk/arch/arm/boot/dts/imx28-m28evk.dts
index fd36e1cca104..6ce3d17c3a29 100644
--- a/trunk/arch/arm/boot/dts/imx28-m28evk.dts
+++ b/trunk/arch/arm/boot/dts/imx28-m28evk.dts
@@ -152,6 +152,7 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
+ clock-frequency = <400000>;
status = "okay";
sgtl5000: codec@0a {
diff --git a/trunk/arch/arm/boot/dts/imx28-sps1.dts b/trunk/arch/arm/boot/dts/imx28-sps1.dts
index 6c6a5442800a..e6cde8aa7fff 100644
--- a/trunk/arch/arm/boot/dts/imx28-sps1.dts
+++ b/trunk/arch/arm/boot/dts/imx28-sps1.dts
@@ -70,6 +70,7 @@
i2c0: i2c@80058000 {
pinctrl-names = "default";
pinctrl-0 = <&i2c0_pins_a>;
+ clock-frequency = <400000>;
status = "okay";
rtc: rtc@51 {
diff --git a/trunk/arch/arm/boot/dts/imx6qdl.dtsi b/trunk/arch/arm/boot/dts/imx6qdl.dtsi
index 281a223591ff..06ec460b4581 100644
--- a/trunk/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/trunk/arch/arm/boot/dts/imx6qdl.dtsi
@@ -91,7 +91,6 @@
compatible = "arm,cortex-a9-twd-timer";
reg = <0x00a00600 0x20>;
interrupts = <1 13 0xf01>;
- clocks = <&clks 15>;
};
L2: l2-cache@00a02000 {
diff --git a/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts b/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts
index c3573be7b92c..bd83b8fc7c83 100644
--- a/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts
+++ b/trunk/arch/arm/boot/dts/kirkwood-goflexnet.dts
@@ -77,7 +77,6 @@
};
nand@3000000 {
- chip-delay = <40>;
status = "okay";
partition@0 {
diff --git a/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts b/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
index 3694e94f6e99..93c3afbef9ee 100644
--- a/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
+++ b/trunk/arch/arm/boot/dts/kirkwood-iomega_ix2_200.dts
@@ -96,11 +96,11 @@
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_1: pmx-led-rebuild-brt-ctrl-1 {
- marvell,pins = "mpp46";
+ marvell,pins = "mpp44";
marvell,function = "gpio";
};
pmx_led_rebuild_brt_ctrl_2: pmx-led-rebuild-brt-ctrl-2 {
- marvell,pins = "mpp47";
+ marvell,pins = "mpp45";
marvell,function = "gpio";
};
@@ -157,14 +157,14 @@
gpios = <&gpio0 16 0>;
linux,default-trigger = "default-on";
};
- rebuild_led {
- label = "status:white:rebuild_led";
- gpios = <&gpio1 4 0>;
- };
- health_led {
+ health_led1 {
label = "status:red:health_led";
gpios = <&gpio1 5 0>;
};
+ health_led2 {
+ label = "status:white:health_led";
+ gpios = <&gpio1 4 0>;
+ };
backup_led {
label = "status:blue:backup_led";
gpios = <&gpio0 15 0>;
diff --git a/trunk/arch/arm/boot/dts/orion5x.dtsi b/trunk/arch/arm/boot/dts/orion5x.dtsi
index f7bec3b1ba32..8aad00f81ed9 100644
--- a/trunk/arch/arm/boot/dts/orion5x.dtsi
+++ b/trunk/arch/arm/boot/dts/orion5x.dtsi
@@ -13,9 +13,6 @@
compatible = "marvell,orion5x";
interrupt-parent = <&intc>;
- aliases {
- gpio0 = &gpio0;
- };
intc: interrupt-controller {
compatible = "marvell,orion-intc", "marvell,intc";
interrupt-controller;
@@ -35,9 +32,7 @@
#gpio-cells = <2>;
gpio-controller;
reg = <0x10100 0x40>;
- ngpios = <32>;
- interrupt-controller;
- #interrupt-cells = <2>;
+ ngpio = <32>;
interrupts = <6>, <7>, <8>, <9>;
};
@@ -96,7 +91,7 @@
reg = <0x90000 0x10000>,
<0xf2200000 0x800>;
reg-names = "regs", "sram";
- interrupts = <28>;
+ interrupts = <22>;
status = "okay";
};
};
diff --git a/trunk/arch/arm/include/asm/delay.h b/trunk/arch/arm/include/asm/delay.h
index dff714d886d5..720799fd3a81 100644
--- a/trunk/arch/arm/include/asm/delay.h
+++ b/trunk/arch/arm/include/asm/delay.h
@@ -24,7 +24,7 @@ extern struct arm_delay_ops {
void (*delay)(unsigned long);
void (*const_udelay)(unsigned long);
void (*udelay)(unsigned long);
- unsigned long ticks_per_jiffy;
+ bool const_clock;
} arm_delay_ops;
#define __delay(n) arm_delay_ops.delay(n)
diff --git a/trunk/arch/arm/include/asm/glue-cache.h b/trunk/arch/arm/include/asm/glue-cache.h
index ea289e1435e7..cca9f15704ed 100644
--- a/trunk/arch/arm/include/asm/glue-cache.h
+++ b/trunk/arch/arm/include/asm/glue-cache.h
@@ -19,6 +19,14 @@
#undef _CACHE
#undef MULTI_CACHE
+#if defined(CONFIG_CPU_CACHE_V3)
+# ifdef _CACHE
+# define MULTI_CACHE 1
+# else
+# define _CACHE v3
+# endif
+#endif
+
#if defined(CONFIG_CPU_CACHE_V4)
# ifdef _CACHE
# define MULTI_CACHE 1
diff --git a/trunk/arch/arm/include/asm/hardware/iop3xx.h b/trunk/arch/arm/include/asm/hardware/iop3xx.h
index ed94b1a366ae..02fe2fbe2477 100644
--- a/trunk/arch/arm/include/asm/hardware/iop3xx.h
+++ b/trunk/arch/arm/include/asm/hardware/iop3xx.h
@@ -37,7 +37,7 @@ extern int iop3xx_get_init_atu(void);
* IOP3XX processor registers
*/
#define IOP3XX_PERIPHERAL_PHYS_BASE 0xffffe000
-#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfedfe000
+#define IOP3XX_PERIPHERAL_VIRT_BASE 0xfeffe000
#define IOP3XX_PERIPHERAL_SIZE 0x00002000
#define IOP3XX_PERIPHERAL_UPPER_PA (IOP3XX_PERIPHERAL_PHYS_BASE +\
IOP3XX_PERIPHERAL_SIZE - 1)
diff --git a/trunk/arch/arm/include/asm/highmem.h b/trunk/arch/arm/include/asm/highmem.h
index 91b99abe7a95..8c5e828f484d 100644
--- a/trunk/arch/arm/include/asm/highmem.h
+++ b/trunk/arch/arm/include/asm/highmem.h
@@ -41,13 +41,6 @@ extern void kunmap_high(struct page *page);
#endif
#endif
-/*
- * Needed to be able to broadcast the TLB invalidation for kmap.
- */
-#ifdef CONFIG_ARM_ERRATA_798181
-#undef ARCH_NEEDS_KMAP_HIGH_GET
-#endif
-
#ifdef ARCH_NEEDS_KMAP_HIGH_GET
extern void *kmap_high_get(struct page *page);
#else
diff --git a/trunk/arch/arm/include/asm/mmu_context.h b/trunk/arch/arm/include/asm/mmu_context.h
index a7b85e0d0cc1..863a6611323c 100644
--- a/trunk/arch/arm/include/asm/mmu_context.h
+++ b/trunk/arch/arm/include/asm/mmu_context.h
@@ -27,8 +27,6 @@ void __check_vmalloc_seq(struct mm_struct *mm);
void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk);
#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
-DECLARE_PER_CPU(atomic64_t, active_asids);
-
#else /* !CONFIG_CPU_HAS_ASID */
#ifdef CONFIG_MMU
diff --git a/trunk/arch/arm/include/asm/pgtable-3level.h b/trunk/arch/arm/include/asm/pgtable-3level.h
index 86b8fe398b95..6ef8afd1b64c 100644
--- a/trunk/arch/arm/include/asm/pgtable-3level.h
+++ b/trunk/arch/arm/include/asm/pgtable-3level.h
@@ -111,7 +111,7 @@
#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/trunk/arch/arm/include/asm/tlbflush.h b/trunk/arch/arm/include/asm/tlbflush.h
index ab865e65a84c..4db8c8820f0d 100644
--- a/trunk/arch/arm/include/asm/tlbflush.h
+++ b/trunk/arch/arm/include/asm/tlbflush.h
@@ -14,6 +14,7 @@
#include
+#define TLB_V3_PAGE (1 << 0)
#define TLB_V4_U_PAGE (1 << 1)
#define TLB_V4_D_PAGE (1 << 2)
#define TLB_V4_I_PAGE (1 << 3)
@@ -21,6 +22,7 @@
#define TLB_V6_D_PAGE (1 << 5)
#define TLB_V6_I_PAGE (1 << 6)
+#define TLB_V3_FULL (1 << 8)
#define TLB_V4_U_FULL (1 << 9)
#define TLB_V4_D_FULL (1 << 10)
#define TLB_V4_I_FULL (1 << 11)
@@ -50,6 +52,7 @@
* =============
*
* We have the following to choose from:
+ * v3 - ARMv3
* v4 - ARMv4 without write buffer
* v4wb - ARMv4 with write buffer without I TLB flush entry instruction
* v4wbi - ARMv4 with write buffer with I TLB flush entry instruction
@@ -327,6 +330,7 @@ static inline void local_flush_tlb_all(void)
if (tlb_flag(TLB_WB))
dsb();
+ tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
tlb_op(TLB_V4_U_FULL | TLB_V6_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL | TLB_V6_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL | TLB_V6_I_FULL, "c8, c5, 0", zero);
@@ -347,8 +351,9 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
if (tlb_flag(TLB_WB))
dsb();
- if (possible_tlb_flags & (TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
+ if (possible_tlb_flags & (TLB_V3_FULL|TLB_V4_U_FULL|TLB_V4_D_FULL|TLB_V4_I_FULL)) {
if (cpumask_test_cpu(get_cpu(), mm_cpumask(mm))) {
+ tlb_op(TLB_V3_FULL, "c6, c0, 0", zero);
tlb_op(TLB_V4_U_FULL, "c8, c7, 0", zero);
tlb_op(TLB_V4_D_FULL, "c8, c6, 0", zero);
tlb_op(TLB_V4_I_FULL, "c8, c5, 0", zero);
@@ -380,8 +385,9 @@ local_flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
if (tlb_flag(TLB_WB))
dsb();
- if (possible_tlb_flags & (TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
+ if (possible_tlb_flags & (TLB_V3_PAGE|TLB_V4_U_PAGE|TLB_V4_D_PAGE|TLB_V4_I_PAGE|TLB_V4_I_FULL) &&
cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+ tlb_op(TLB_V3_PAGE, "c6, c0, 0", uaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", uaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", uaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", uaddr);
@@ -412,6 +418,7 @@ static inline void local_flush_tlb_kernel_page(unsigned long kaddr)
if (tlb_flag(TLB_WB))
dsb();
+ tlb_op(TLB_V3_PAGE, "c6, c0, 0", kaddr);
tlb_op(TLB_V4_U_PAGE, "c8, c7, 1", kaddr);
tlb_op(TLB_V4_D_PAGE, "c8, c6, 1", kaddr);
tlb_op(TLB_V4_I_PAGE, "c8, c5, 1", kaddr);
@@ -443,21 +450,6 @@ static inline void local_flush_bp_all(void)
isb();
}
-#ifdef CONFIG_ARM_ERRATA_798181
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
- /*
- * Dummy TLBIMVAIS. Using the unmapped address 0 and ASID 0.
- */
- asm("mcr p15, 0, %0, c8, c3, 1" : : "r" (0));
- dsb();
-}
-#else
-static inline void dummy_flush_tlb_a15_erratum(void)
-{
-}
-#endif
-
/*
* flush_pmd_entry
*
diff --git a/trunk/arch/arm/kernel/entry-common.S b/trunk/arch/arm/kernel/entry-common.S
index fefd7f971437..3248cde504ed 100644
--- a/trunk/arch/arm/kernel/entry-common.S
+++ b/trunk/arch/arm/kernel/entry-common.S
@@ -276,13 +276,7 @@ ENDPROC(ftrace_graph_caller_old)
*/
.macro mcount_enter
-/*
- * This pad compensates for the push {lr} at the call site. Note that we are
- * unable to unwind through a function which does not otherwise save its lr.
- */
- UNWIND(.pad #4)
stmdb sp!, {r0-r3, lr}
- UNWIND(.save {r0-r3, lr})
.endm
.macro mcount_get_lr reg
@@ -295,7 +289,6 @@ ENDPROC(ftrace_graph_caller_old)
.endm
ENTRY(__gnu_mcount_nc)
-UNWIND(.fnstart)
#ifdef CONFIG_DYNAMIC_FTRACE
mov ip, lr
ldmia sp!, {lr}
@@ -303,22 +296,17 @@ UNWIND(.fnstart)
#else
__mcount
#endif
-UNWIND(.fnend)
ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
-UNWIND(.fnstart)
__ftrace_caller
-UNWIND(.fnend)
ENDPROC(ftrace_caller)
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
-UNWIND(.fnstart)
__ftrace_graph_caller
-UNWIND(.fnend)
ENDPROC(ftrace_graph_caller)
#endif
diff --git a/trunk/arch/arm/kernel/head.S b/trunk/arch/arm/kernel/head.S
index 8bac553fe213..e0eb9a1cae77 100644
--- a/trunk/arch/arm/kernel/head.S
+++ b/trunk/arch/arm/kernel/head.S
@@ -267,7 +267,7 @@ __create_page_tables:
addne r6, r6, #1 << SECTION_SHIFT
strne r6, [r3]
-#if defined(CONFIG_ARM_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
+#if defined(CONFIG_LPAE) && defined(CONFIG_CPU_ENDIAN_BE8)
sub r4, r4, #4 @ Fixup page table pointer
@ for 64-bit descriptors
#endif
diff --git a/trunk/arch/arm/kernel/hw_breakpoint.c b/trunk/arch/arm/kernel/hw_breakpoint.c
index 1fd749ee4a1b..96093b75ab90 100644
--- a/trunk/arch/arm/kernel/hw_breakpoint.c
+++ b/trunk/arch/arm/kernel/hw_breakpoint.c
@@ -966,7 +966,7 @@ static void reset_ctrl_regs(void *unused)
}
if (err) {
- pr_warn_once("CPU %d debug is powered down!\n", cpu);
+ pr_warning("CPU %d debug is powered down!\n", cpu);
cpumask_or(&debug_err_mask, &debug_err_mask, cpumask_of(cpu));
return;
}
@@ -987,7 +987,7 @@ static void reset_ctrl_regs(void *unused)
isb();
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warn_once("CPU %d failed to disable vector catch\n", cpu);
+ pr_warning("CPU %d failed to disable vector catch\n", cpu);
return;
}
@@ -1007,7 +1007,7 @@ static void reset_ctrl_regs(void *unused)
}
if (cpumask_intersects(&debug_err_mask, cpumask_of(cpu))) {
- pr_warn_once("CPU %d failed to clear debug register pairs\n", cpu);
+ pr_warning("CPU %d failed to clear debug register pairs\n", cpu);
return;
}
@@ -1043,7 +1043,7 @@ static int dbg_cpu_pm_notify(struct notifier_block *self, unsigned long action,
return NOTIFY_OK;
}
-static struct notifier_block dbg_cpu_pm_nb = {
+static struct notifier_block __cpuinitdata dbg_cpu_pm_nb = {
.notifier_call = dbg_cpu_pm_notify,
};
diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c
index 8c3094d0f7b7..146157dfe27c 100644
--- a/trunk/arch/arm/kernel/perf_event.c
+++ b/trunk/arch/arm/kernel/perf_event.c
@@ -253,10 +253,7 @@ validate_event(struct pmu_hw_events *hw_events,
struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
struct pmu *leader_pmu = event->group_leader->pmu;
- if (event->pmu != leader_pmu || event->state < PERF_EVENT_STATE_OFF)
- return 1;
-
- if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec)
+ if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF)
return 1;
return armpmu->get_event_idx(hw_events, event) >= 0;
diff --git a/trunk/arch/arm/kernel/sched_clock.c b/trunk/arch/arm/kernel/sched_clock.c
index 59d2adb764a9..bd6f56b9ec21 100644
--- a/trunk/arch/arm/kernel/sched_clock.c
+++ b/trunk/arch/arm/kernel/sched_clock.c
@@ -45,12 +45,12 @@ static u32 notrace jiffy_sched_clock_read(void)
static u32 __read_mostly (*read_sched_clock)(void) = jiffy_sched_clock_read;
-static inline u64 notrace cyc_to_ns(u64 cyc, u32 mult, u32 shift)
+static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
-static unsigned long long notrace cyc_to_sched_clock(u32 cyc, u32 mask)
+static unsigned long long cyc_to_sched_clock(u32 cyc, u32 mask)
{
u64 epoch_ns;
u32 epoch_cyc;
diff --git a/trunk/arch/arm/kernel/setup.c b/trunk/arch/arm/kernel/setup.c
index 234e339196c0..3f6cbb2e3eda 100644
--- a/trunk/arch/arm/kernel/setup.c
+++ b/trunk/arch/arm/kernel/setup.c
@@ -56,6 +56,7 @@
#include
#include "atags.h"
+#include "tcm.h"
#if defined(CONFIG_FPE_NWFPE) || defined(CONFIG_FPE_FASTFPE)
@@ -352,23 +353,6 @@ void __init early_print(const char *str, ...)
printk("%s", buf);
}
-static void __init cpuid_init_hwcaps(void)
-{
- unsigned int divide_instrs;
-
- if (cpu_architecture() < CPU_ARCH_ARMv7)
- return;
-
- divide_instrs = (read_cpuid_ext(CPUID_EXT_ISAR0) & 0x0f000000) >> 24;
-
- switch (divide_instrs) {
- case 2:
- elf_hwcap |= HWCAP_IDIVA;
- case 1:
- elf_hwcap |= HWCAP_IDIVT;
- }
-}
-
static void __init feat_v6_fixup(void)
{
int id = read_cpuid_id();
@@ -499,11 +483,8 @@ static void __init setup_processor(void)
snprintf(elf_platform, ELF_PLATFORM_SIZE, "%s%c",
list->elf_name, ENDIANNESS);
elf_hwcap = list->elf_hwcap;
-
- cpuid_init_hwcaps();
-
#ifndef CONFIG_ARM_THUMB
- elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
+ elf_hwcap &= ~HWCAP_THUMB;
#endif
feat_v6_fixup();
@@ -543,7 +524,7 @@ int __init arm_add_memory(phys_addr_t start, phys_addr_t size)
size -= start & ~PAGE_MASK;
bank->start = PAGE_ALIGN(start);
-#ifndef CONFIG_ARM_LPAE
+#ifndef CONFIG_LPAE
if (bank->start + size < bank->start) {
printk(KERN_CRIT "Truncating memory at 0x%08llx to fit in "
"32-bit physical address space\n", (long long)start);
@@ -797,6 +778,8 @@ void __init setup_arch(char **cmdline_p)
reserve_crashkernel();
+ tcm_init();
+
#ifdef CONFIG_MULTI_IRQ_HANDLER
handle_arch_irq = mdesc->handle_irq;
#endif
diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c
index 1f2ccccaf009..79078edbb9bc 100644
--- a/trunk/arch/arm/kernel/smp.c
+++ b/trunk/arch/arm/kernel/smp.c
@@ -673,6 +673,9 @@ static int cpufreq_callback(struct notifier_block *nb,
if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;
+ if (arm_delay_ops.const_clock)
+ return NOTIFY_OK;
+
if (!per_cpu(l_p_j_ref, cpu)) {
per_cpu(l_p_j_ref, cpu) =
per_cpu(cpu_data, cpu).loops_per_jiffy;
diff --git a/trunk/arch/arm/kernel/smp_tlb.c b/trunk/arch/arm/kernel/smp_tlb.c
index e82e1d248772..bd0300531399 100644
--- a/trunk/arch/arm/kernel/smp_tlb.c
+++ b/trunk/arch/arm/kernel/smp_tlb.c
@@ -12,7 +12,6 @@
#include
#include
-#include
/**********************************************************************/
@@ -70,72 +69,12 @@ static inline void ipi_flush_bp_all(void *ignored)
local_flush_bp_all();
}
-#ifdef CONFIG_ARM_ERRATA_798181
-static int erratum_a15_798181(void)
-{
- unsigned int midr = read_cpuid_id();
-
- /* Cortex-A15 r0p0..r3p2 affected */
- if ((midr & 0xff0ffff0) != 0x410fc0f0 || midr > 0x413fc0f2)
- return 0;
- return 1;
-}
-#else
-static int erratum_a15_798181(void)
-{
- return 0;
-}
-#endif
-
-static void ipi_flush_tlb_a15_erratum(void *arg)
-{
- dmb();
-}
-
-static void broadcast_tlb_a15_erratum(void)
-{
- if (!erratum_a15_798181())
- return;
-
- dummy_flush_tlb_a15_erratum();
- smp_call_function_many(cpu_online_mask, ipi_flush_tlb_a15_erratum,
- NULL, 1);
-}
-
-static void broadcast_tlb_mm_a15_erratum(struct mm_struct *mm)
-{
- int cpu;
- cpumask_t mask = { CPU_BITS_NONE };
-
- if (!erratum_a15_798181())
- return;
-
- dummy_flush_tlb_a15_erratum();
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- /*
- * We only need to send an IPI if the other CPUs are running
- * the same ASID as the one being invalidated. There is no
- * need for locking around the active_asids check since the
- * switch_mm() function has at least one dmb() (as required by
- * this workaround) in case a context switch happens on
- * another CPU after the condition below.
- */
- if (atomic64_read(&mm->context.id) ==
- atomic64_read(&per_cpu(active_asids, cpu)))
- cpumask_set_cpu(cpu, &mask);
- }
- smp_call_function_many(&mask, ipi_flush_tlb_a15_erratum, NULL, 1);
-}
-
void flush_tlb_all(void)
{
if (tlb_ops_need_broadcast())
on_each_cpu(ipi_flush_tlb_all, NULL, 1);
else
local_flush_tlb_all();
- broadcast_tlb_a15_erratum();
}
void flush_tlb_mm(struct mm_struct *mm)
@@ -144,7 +83,6 @@ void flush_tlb_mm(struct mm_struct *mm)
on_each_cpu_mask(mm_cpumask(mm), ipi_flush_tlb_mm, mm, 1);
else
local_flush_tlb_mm(mm);
- broadcast_tlb_mm_a15_erratum(mm);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
@@ -157,7 +95,6 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
&ta, 1);
} else
local_flush_tlb_page(vma, uaddr);
- broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_page(unsigned long kaddr)
@@ -168,7 +105,6 @@ void flush_tlb_kernel_page(unsigned long kaddr)
on_each_cpu(ipi_flush_tlb_kernel_page, &ta, 1);
} else
local_flush_tlb_kernel_page(kaddr);
- broadcast_tlb_a15_erratum();
}
void flush_tlb_range(struct vm_area_struct *vma,
@@ -183,7 +119,6 @@ void flush_tlb_range(struct vm_area_struct *vma,
&ta, 1);
} else
local_flush_tlb_range(vma, start, end);
- broadcast_tlb_mm_a15_erratum(vma->vm_mm);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
@@ -195,7 +130,6 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end)
on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1);
} else
local_flush_tlb_kernel_range(start, end);
- broadcast_tlb_a15_erratum();
}
void flush_bp_all(void)
diff --git a/trunk/arch/arm/kernel/tcm.c b/trunk/arch/arm/kernel/tcm.c
index f50f19e5c138..30ae6bb4a310 100644
--- a/trunk/arch/arm/kernel/tcm.c
+++ b/trunk/arch/arm/kernel/tcm.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include "tcm.h"
static struct gen_pool *tcm_pool;
static bool dtcm_present;
diff --git a/trunk/arch/arm/mm/tcm.h b/trunk/arch/arm/kernel/tcm.h
similarity index 100%
rename from trunk/arch/arm/mm/tcm.h
rename to trunk/arch/arm/kernel/tcm.h
diff --git a/trunk/arch/arm/kvm/arm.c b/trunk/arch/arm/kvm/arm.c
index c1fe498983ac..5a936988eb24 100644
--- a/trunk/arch/arm/kvm/arm.c
+++ b/trunk/arch/arm/kvm/arm.c
@@ -201,7 +201,6 @@ int kvm_dev_ioctl_check_extension(long ext)
break;
case KVM_CAP_ARM_SET_DEVICE_ADDR:
r = 1;
- break;
case KVM_CAP_NR_VCPUS:
r = num_online_cpus();
break;
diff --git a/trunk/arch/arm/kvm/coproc.c b/trunk/arch/arm/kvm/coproc.c
index 7bed7556077a..4ea9a982269c 100644
--- a/trunk/arch/arm/kvm/coproc.c
+++ b/trunk/arch/arm/kvm/coproc.c
@@ -79,11 +79,11 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
u32 val;
int cpu;
+ cpu = get_cpu();
+
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
cpumask_setall(&vcpu->arch.require_dcache_flush);
cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
diff --git a/trunk/arch/arm/kvm/vgic.c b/trunk/arch/arm/kvm/vgic.c
index 0e4cfe123b38..c9a17316e9fe 100644
--- a/trunk/arch/arm/kvm/vgic.c
+++ b/trunk/arch/arm/kvm/vgic.c
@@ -883,7 +883,8 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
lr, irq, vgic_cpu->vgic_lr[lr]);
BUG_ON(!test_bit(lr, vgic_cpu->lr_used));
vgic_cpu->vgic_lr[lr] |= GICH_LR_PENDING_BIT;
- return true;
+
+ goto out;
}
/* Try to use another LR for this interrupt */
@@ -897,6 +898,7 @@ static bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq)
vgic_cpu->vgic_irq_lr_map[irq] = lr;
set_bit(lr, vgic_cpu->lr_used);
+out:
if (!vgic_irq_is_edge(vcpu, irq))
vgic_cpu->vgic_lr[lr] |= GICH_LR_EOI;
@@ -1016,6 +1018,21 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
kvm_debug("MISR = %08x\n", vgic_cpu->vgic_misr);
+ /*
+ * We do not need to take the distributor lock here, since the only
+ * action we perform is clearing the irq_active_bit for an EOIed
+ * level interrupt. There is a potential race with
+ * the queuing of an interrupt in __kvm_vgic_flush_hwstate(), where we
+ * check if the interrupt is already active. Two possibilities:
+ *
+ * - The queuing is occurring on the same vcpu: cannot happen,
+ * as we're already in the context of this vcpu, and
+ * executing the handler
+ * - The interrupt has been migrated to another vcpu, and we
+ * ignore this interrupt for this run. Big deal. It is still
+ * pending though, and will get considered when this vcpu
+ * exits.
+ */
if (vgic_cpu->vgic_misr & GICH_MISR_EOI) {
/*
* Some level interrupts have been EOIed. Clear their
@@ -1037,13 +1054,6 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
} else {
vgic_cpu_irq_clear(vcpu, irq);
}
-
- /*
- * Despite being EOIed, the LR may not have
- * been marked as empty.
- */
- set_bit(lr, (unsigned long *)vgic_cpu->vgic_elrsr);
- vgic_cpu->vgic_lr[lr] &= ~GICH_LR_ACTIVE_BIT;
}
}
@@ -1054,8 +1064,9 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
}
/*
- * Sync back the VGIC state after a guest run. The distributor lock is
- * needed so we don't get preempted in the middle of the state processing.
+ * Sync back the VGIC state after a guest run. We do not really touch
+ * the distributor here (the irq_pending_on_cpu bit is safe to set),
+ * so there is no need for taking its lock.
*/
static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
@@ -1101,14 +1112,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
- struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
-
if (!irqchip_in_kernel(vcpu->kvm))
return;
- spin_lock(&dist->lock);
__kvm_vgic_sync_hwstate(vcpu);
- spin_unlock(&dist->lock);
}
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)
diff --git a/trunk/arch/arm/lib/delay.c b/trunk/arch/arm/lib/delay.c
index 64dbfa57204a..6b93f6a1a3c7 100644
--- a/trunk/arch/arm/lib/delay.c
+++ b/trunk/arch/arm/lib/delay.c
@@ -58,7 +58,7 @@ static void __timer_delay(unsigned long cycles)
static void __timer_const_udelay(unsigned long xloops)
{
unsigned long long loops = xloops;
- loops *= arm_delay_ops.ticks_per_jiffy;
+ loops *= loops_per_jiffy;
__timer_delay(loops >> UDELAY_SHIFT);
}
@@ -73,13 +73,11 @@ void __init register_current_timer_delay(const struct delay_timer *timer)
pr_info("Switching to timer-based delay loop\n");
delay_timer = timer;
lpj_fine = timer->freq / HZ;
-
- /* cpufreq may scale loops_per_jiffy, so keep a private copy */
- arm_delay_ops.ticks_per_jiffy = lpj_fine;
+ loops_per_jiffy = lpj_fine;
arm_delay_ops.delay = __timer_delay;
arm_delay_ops.const_udelay = __timer_const_udelay;
arm_delay_ops.udelay = __timer_udelay;
-
+ arm_delay_ops.const_clock = true;
delay_calibrated = true;
} else {
pr_info("Ignoring duplicate/late registration of read_current_timer delay\n");
diff --git a/trunk/arch/arm/mach-cns3xxx/core.c b/trunk/arch/arm/mach-cns3xxx/core.c
index 52e4bb5cf12d..e698f26cc0cb 100644
--- a/trunk/arch/arm/mach-cns3xxx/core.c
+++ b/trunk/arch/arm/mach-cns3xxx/core.c
@@ -22,9 +22,19 @@
static struct map_desc cns3xxx_io_desc[] __initdata = {
{
- .virtual = CNS3XXX_TC11MP_SCU_BASE_VIRT,
- .pfn = __phys_to_pfn(CNS3XXX_TC11MP_SCU_BASE),
- .length = SZ_8K,
+ .virtual = CNS3XXX_TC11MP_TWD_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_TWD_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_CPU_BASE),
+ .length = SZ_4K,
+ .type = MT_DEVICE,
+ }, {
+ .virtual = CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT,
+ .pfn = __phys_to_pfn(CNS3XXX_TC11MP_GIC_DIST_BASE),
+ .length = SZ_4K,
.type = MT_DEVICE,
}, {
.virtual = CNS3XXX_TIMER1_2_3_BASE_VIRT,
diff --git a/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h b/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
index b1021aafa481..191c8e57f289 100644
--- a/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
+++ b/trunk/arch/arm/mach-cns3xxx/include/mach/cns3xxx.h
@@ -94,10 +94,10 @@
#define RTC_INTR_STS_OFFSET 0x34
#define CNS3XXX_MISC_BASE 0x76000000 /* Misc Control */
-#define CNS3XXX_MISC_BASE_VIRT 0xFB000000 /* Misc Control */
+#define CNS3XXX_MISC_BASE_VIRT 0xFFF07000 /* Misc Control */
#define CNS3XXX_PM_BASE 0x77000000 /* Power Management Control */
-#define CNS3XXX_PM_BASE_VIRT 0xFB001000
+#define CNS3XXX_PM_BASE_VIRT 0xFFF08000
#define PM_CLK_GATE_OFFSET 0x00
#define PM_SOFT_RST_OFFSET 0x04
@@ -109,7 +109,7 @@
#define PM_PLL_HM_PD_OFFSET 0x1C
#define CNS3XXX_UART0_BASE 0x78000000 /* UART 0 */
-#define CNS3XXX_UART0_BASE_VIRT 0xFB002000
+#define CNS3XXX_UART0_BASE_VIRT 0xFFF09000
#define CNS3XXX_UART1_BASE 0x78400000 /* UART 1 */
#define CNS3XXX_UART1_BASE_VIRT 0xFFF0A000
@@ -130,7 +130,7 @@
#define CNS3XXX_I2S_BASE_VIRT 0xFFF10000
#define CNS3XXX_TIMER1_2_3_BASE 0x7C800000 /* Timer */
-#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFB003000
+#define CNS3XXX_TIMER1_2_3_BASE_VIRT 0xFFF10800
#define TIMER1_COUNTER_OFFSET 0x00
#define TIMER1_AUTO_RELOAD_OFFSET 0x04
@@ -227,16 +227,16 @@
* Testchip peripheral and fpga gic regions
*/
#define CNS3XXX_TC11MP_SCU_BASE 0x90000000 /* IRQ, Test chip */
-#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFB004000
+#define CNS3XXX_TC11MP_SCU_BASE_VIRT 0xFF000000
#define CNS3XXX_TC11MP_GIC_CPU_BASE 0x90000100 /* Test chip interrupt controller CPU interface */
-#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x100)
+#define CNS3XXX_TC11MP_GIC_CPU_BASE_VIRT 0xFF000100
#define CNS3XXX_TC11MP_TWD_BASE 0x90000600
-#define CNS3XXX_TC11MP_TWD_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x600)
+#define CNS3XXX_TC11MP_TWD_BASE_VIRT 0xFF000600
#define CNS3XXX_TC11MP_GIC_DIST_BASE 0x90001000 /* Test chip interrupt controller distributor */
-#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT (CNS3XXX_TC11MP_SCU_BASE_VIRT + 0x1000)
+#define CNS3XXX_TC11MP_GIC_DIST_BASE_VIRT 0xFF001000
#define CNS3XXX_TC11MP_L220_BASE 0x92002000 /* L220 registers */
#define CNS3XXX_TC11MP_L220_BASE_VIRT 0xFF002000
diff --git a/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h b/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h
index b5cc77d2380b..d2afb4dd82ab 100644
--- a/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h
+++ b/trunk/arch/arm/mach-ep93xx/include/mach/uncompress.h
@@ -47,13 +47,9 @@ static void __raw_writel(unsigned int value, unsigned int ptr)
static inline void putc(int c)
{
- int i;
-
- for (i = 0; i < 10000; i++) {
- /* Transmit fifo not full? */
- if (!(__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF))
- break;
- }
+ /* Transmit fifo not full? */
+ while (__raw_readb(PHYS_UART_FLAG) & UART_FLAG_TXFF)
+ ;
__raw_writeb(c, PHYS_UART_DATA);
}
diff --git a/trunk/arch/arm/mach-highbank/hotplug.c b/trunk/arch/arm/mach-highbank/hotplug.c
index 890cae23c12a..f30c52843396 100644
--- a/trunk/arch/arm/mach-highbank/hotplug.c
+++ b/trunk/arch/arm/mach-highbank/hotplug.c
@@ -28,11 +28,13 @@ extern void secondary_startup(void);
*/
void __ref highbank_cpu_die(unsigned int cpu)
{
- highbank_set_cpu_jump(cpu, phys_to_virt(0));
+ flush_cache_all();
- flush_cache_louis();
+ highbank_set_cpu_jump(cpu, phys_to_virt(0));
highbank_set_core_pwr();
- while (1)
- cpu_do_idle();
+ cpu_do_idle();
+
+ /* We should never return from idle */
+ panic("highbank: cpu %d unexpectedly exit from shutdown\n", cpu);
}
diff --git a/trunk/arch/arm/mach-imx/clk-imx35.c b/trunk/arch/arm/mach-imx/clk-imx35.c
index 2193c834f55c..e13a8fa5e62c 100644
--- a/trunk/arch/arm/mach-imx/clk-imx35.c
+++ b/trunk/arch/arm/mach-imx/clk-imx35.c
@@ -257,7 +257,6 @@ int __init mx35_clocks_init(void)
clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0");
clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
- clk_register_clkdev(clk[admux_gate], "audmux", NULL);
clk_prepare_enable(clk[spba_gate]);
clk_prepare_enable(clk[gpio1_gate]);
@@ -266,7 +265,6 @@ int __init mx35_clocks_init(void)
clk_prepare_enable(clk[iim_gate]);
clk_prepare_enable(clk[emi_gate]);
clk_prepare_enable(clk[max_gate]);
- clk_prepare_enable(clk[iomuxc_gate]);
/*
* SCC is needed to boot via mmc after a watchdog reset. The clock code
diff --git a/trunk/arch/arm/mach-imx/clk-imx6q.c b/trunk/arch/arm/mach-imx/clk-imx6q.c
index d38e54f5b6d7..2f9ff93a4e61 100644
--- a/trunk/arch/arm/mach-imx/clk-imx6q.c
+++ b/trunk/arch/arm/mach-imx/clk-imx6q.c
@@ -115,7 +115,7 @@ static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m"
static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
-static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_usb_otg", };
+static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "mmdc_ch1_axi", "pll3_pfd1_540m", };
static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
@@ -443,6 +443,7 @@ int __init mx6q_clocks_init(void)
clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
+ clk_register_clkdev(clk[twd], NULL, "smp_twd");
clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
clk_register_clkdev(clk[ahb], "ahb", NULL);
clk_register_clkdev(clk[cko1], "cko1", NULL);
diff --git a/trunk/arch/arm/mach-imx/common.h b/trunk/arch/arm/mach-imx/common.h
index 5bf4a97ab241..5a800bfcec5b 100644
--- a/trunk/arch/arm/mach-imx/common.h
+++ b/trunk/arch/arm/mach-imx/common.h
@@ -110,8 +110,6 @@ void tzic_handle_irq(struct pt_regs *);
extern void imx_enable_cpu(int cpu, bool enable);
extern void imx_set_cpu_jump(int cpu, void *jump_addr);
-extern u32 imx_get_cpu_arg(int cpu);
-extern void imx_set_cpu_arg(int cpu, u32 arg);
extern void v7_cpu_resume(void);
extern u32 *pl310_get_save_ptr(void);
#ifdef CONFIG_SMP
diff --git a/trunk/arch/arm/mach-imx/hotplug.c b/trunk/arch/arm/mach-imx/hotplug.c
index 361a253e2b63..7bc5fe15dda2 100644
--- a/trunk/arch/arm/mach-imx/hotplug.c
+++ b/trunk/arch/arm/mach-imx/hotplug.c
@@ -46,23 +46,11 @@ static inline void cpu_enter_lowpower(void)
void imx_cpu_die(unsigned int cpu)
{
cpu_enter_lowpower();
- /*
- * We use the cpu jumping argument register to sync with
- * imx_cpu_kill() which is running on cpu0 and waiting for
- * the register being cleared to kill the cpu.
- */
- imx_set_cpu_arg(cpu, ~0);
cpu_do_idle();
}
int imx_cpu_kill(unsigned int cpu)
{
- unsigned long timeout = jiffies + msecs_to_jiffies(50);
-
- while (imx_get_cpu_arg(cpu) == 0)
- if (time_after(jiffies, timeout))
- return 0;
imx_enable_cpu(cpu, false);
- imx_set_cpu_arg(cpu, 0);
return 1;
}
diff --git a/trunk/arch/arm/mach-imx/src.c b/trunk/arch/arm/mach-imx/src.c
index 09a742f8c7ab..e15f1555c59b 100644
--- a/trunk/arch/arm/mach-imx/src.c
+++ b/trunk/arch/arm/mach-imx/src.c
@@ -43,18 +43,6 @@ void imx_set_cpu_jump(int cpu, void *jump_addr)
src_base + SRC_GPR1 + cpu * 8);
}
-u32 imx_get_cpu_arg(int cpu)
-{
- cpu = cpu_logical_map(cpu);
- return readl_relaxed(src_base + SRC_GPR1 + cpu * 8 + 4);
-}
-
-void imx_set_cpu_arg(int cpu, u32 arg)
-{
- cpu = cpu_logical_map(cpu);
- writel_relaxed(arg, src_base + SRC_GPR1 + cpu * 8 + 4);
-}
-
void imx_src_prepare_restart(void)
{
u32 val;
diff --git a/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c b/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
index e5f70415905a..f655b2637b0e 100644
--- a/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
+++ b/trunk/arch/arm/mach-kirkwood/board-iomega_ix2_200.c
@@ -20,15 +20,10 @@ static struct mv643xx_eth_platform_data iomega_ix2_200_ge00_data = {
.duplex = DUPLEX_FULL,
};
-static struct mv643xx_eth_platform_data iomega_ix2_200_ge01_data = {
- .phy_addr = MV643XX_ETH_PHY_ADDR(11),
-};
-
void __init iomega_ix2_200_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
- kirkwood_ge00_init(&iomega_ix2_200_ge00_data);
- kirkwood_ge01_init(&iomega_ix2_200_ge01_data);
+ kirkwood_ge01_init(&iomega_ix2_200_ge00_data);
}
diff --git a/trunk/arch/arm/mach-kirkwood/guruplug-setup.c b/trunk/arch/arm/mach-kirkwood/guruplug-setup.c
index 08dd739aa709..1c6e736cbbf8 100644
--- a/trunk/arch/arm/mach-kirkwood/guruplug-setup.c
+++ b/trunk/arch/arm/mach-kirkwood/guruplug-setup.c
@@ -53,8 +53,6 @@ static struct mv_sata_platform_data guruplug_sata_data = {
static struct mvsdio_platform_data guruplug_mvsdio_data = {
/* unfortunately the CD signal has not been connected */
- .gpio_card_detect = -1,
- .gpio_write_protect = -1,
};
static struct gpio_led guruplug_led_pins[] = {
diff --git a/trunk/arch/arm/mach-kirkwood/openrd-setup.c b/trunk/arch/arm/mach-kirkwood/openrd-setup.c
index 6a6eb548307d..8ddd69fdc937 100644
--- a/trunk/arch/arm/mach-kirkwood/openrd-setup.c
+++ b/trunk/arch/arm/mach-kirkwood/openrd-setup.c
@@ -55,7 +55,6 @@ static struct mv_sata_platform_data openrd_sata_data = {
static struct mvsdio_platform_data openrd_mvsdio_data = {
.gpio_card_detect = 29, /* MPP29 used as SD card detect */
- .gpio_write_protect = -1,
};
static unsigned int openrd_mpp_config[] __initdata = {
diff --git a/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c b/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c
index d24223166e06..c7d93b48926b 100644
--- a/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c
+++ b/trunk/arch/arm/mach-kirkwood/rd88f6281-setup.c
@@ -69,7 +69,6 @@ static struct mv_sata_platform_data rd88f6281_sata_data = {
static struct mvsdio_platform_data rd88f6281_mvsdio_data = {
.gpio_card_detect = 28,
- .gpio_write_protect = -1,
};
static unsigned int rd88f6281_mpp_config[] __initdata = {
diff --git a/trunk/arch/arm/mach-msm/timer.c b/trunk/arch/arm/mach-msm/timer.c
index f9fd77e8f1f5..2969027f02fa 100644
--- a/trunk/arch/arm/mach-msm/timer.c
+++ b/trunk/arch/arm/mach-msm/timer.c
@@ -62,10 +62,7 @@ static int msm_timer_set_next_event(unsigned long cycles,
{
u32 ctrl = readl_relaxed(event_base + TIMER_ENABLE);
- ctrl &= ~TIMER_ENABLE_EN;
- writel_relaxed(ctrl, event_base + TIMER_ENABLE);
-
- writel_relaxed(ctrl, event_base + TIMER_CLEAR);
+ writel_relaxed(0, event_base + TIMER_CLEAR);
writel_relaxed(cycles, event_base + TIMER_MATCH_VAL);
writel_relaxed(ctrl | TIMER_ENABLE_EN, event_base + TIMER_ENABLE);
return 0;
diff --git a/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c b/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
index d5970f5a1e8d..274ff58271de 100644
--- a/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
+++ b/trunk/arch/arm/mach-mvebu/irq-armada-370-xp.c
@@ -44,8 +44,6 @@
#define ARMADA_370_XP_MAX_PER_CPU_IRQS (28)
-#define ARMADA_370_XP_TIMER0_PER_CPU_IRQ (5)
-
#define ACTIVE_DOORBELLS (8)
static DEFINE_RAW_SPINLOCK(irq_controller_lock);
@@ -61,26 +59,36 @@ static struct irq_domain *armada_370_xp_mpic_domain;
*/
static void armada_370_xp_irq_mask(struct irq_data *d)
{
+#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+ if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_SET_MASK_OFFS);
+#else
+ writel(irqd_to_hwirq(d),
+ per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+#endif
}
static void armada_370_xp_irq_unmask(struct irq_data *d)
{
+#ifdef CONFIG_SMP
irq_hw_number_t hwirq = irqd_to_hwirq(d);
- if (hwirq != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
+ if (hwirq > ARMADA_370_XP_MAX_PER_CPU_IRQS)
writel(hwirq, main_int_base +
ARMADA_370_XP_INT_SET_ENABLE_OFFS);
else
writel(hwirq, per_cpu_int_base +
ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#else
+ writel(irqd_to_hwirq(d),
+ per_cpu_int_base + ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
+#endif
}
#ifdef CONFIG_SMP
@@ -136,14 +144,10 @@ static int armada_370_xp_mpic_irq_map(struct irq_domain *h,
unsigned int virq, irq_hw_number_t hw)
{
armada_370_xp_irq_mask(irq_get_irq_data(virq));
- if (hw != ARMADA_370_XP_TIMER0_PER_CPU_IRQ)
- writel(hw, per_cpu_int_base +
- ARMADA_370_XP_INT_CLEAR_MASK_OFFS);
- else
- writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
+ writel(hw, main_int_base + ARMADA_370_XP_INT_SET_ENABLE_OFFS);
irq_set_status_flags(virq, IRQ_LEVEL);
- if (hw == ARMADA_370_XP_TIMER0_PER_CPU_IRQ) {
+ if (hw < ARMADA_370_XP_MAX_PER_CPU_IRQS) {
irq_set_percpu_devid(virq);
irq_set_chip_and_handler(virq, &armada_370_xp_irq_chip,
handle_percpu_devid_irq);
diff --git a/trunk/arch/arm/mach-omap1/clock_data.c b/trunk/arch/arm/mach-omap1/clock_data.c
index 6c4f766365a2..cb7c6ae2e3fc 100644
--- a/trunk/arch/arm/mach-omap1/clock_data.c
+++ b/trunk/arch/arm/mach-omap1/clock_data.c
@@ -538,6 +538,15 @@ static struct clk usb_hhc_ck16xx = {
};
static struct clk usb_dc_ck = {
+ .name = "usb_dc_ck",
+ .ops = &clkops_generic,
+ /* Direct from ULPD, no parent */
+ .rate = 48000000,
+ .enable_reg = OMAP1_IO_ADDRESS(SOFT_REQ_REG),
+ .enable_bit = USB_REQ_EN_SHIFT,
+};
+
+static struct clk usb_dc_ck7xx = {
.name = "usb_dc_ck",
.ops = &clkops_generic,
/* Direct from ULPD, no parent */
@@ -718,7 +727,8 @@ static struct omap_clk omap_clks[] = {
CLK(NULL, "usb_clko", &usb_clko, CK_16XX | CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck1510, CK_1510 | CK_310),
CLK(NULL, "usb_hhc_ck", &usb_hhc_ck16xx, CK_16XX),
- CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX | CK_7XX),
+ CLK(NULL, "usb_dc_ck", &usb_dc_ck, CK_16XX),
+ CLK(NULL, "usb_dc_ck", &usb_dc_ck7xx, CK_7XX),
CLK(NULL, "mclk", &mclk_1510, CK_1510 | CK_310),
CLK(NULL, "mclk", &mclk_16xx, CK_16XX),
CLK(NULL, "bclk", &bclk_1510, CK_1510 | CK_310),
diff --git a/trunk/arch/arm/mach-omap2/cclock44xx_data.c b/trunk/arch/arm/mach-omap2/cclock44xx_data.c
index 0c6834ae1fc4..3d58f335f173 100644
--- a/trunk/arch/arm/mach-omap2/cclock44xx_data.c
+++ b/trunk/arch/arm/mach-omap2/cclock44xx_data.c
@@ -52,13 +52,6 @@
*/
#define OMAP4_DPLL_ABE_DEFFREQ 98304000
-/*
- * OMAP4 USB DPLL default frequency. In OMAP4430 TRM version V, section
- * "3.6.3.9.5 DPLL_USB Preferred Settings" shows that the preferred
- * locked frequency for the USB DPLL is 960MHz.
- */
-#define OMAP4_DPLL_USB_DEFFREQ 960000000
-
/* Root clocks */
DEFINE_CLK_FIXED_RATE(extalt_clkin_ck, CLK_IS_ROOT, 59000000, 0x0);
@@ -1018,10 +1011,6 @@ DEFINE_CLK_OMAP_MUX(hsmmc2_fclk, "l3_init_clkdm", hsmmc1_fclk_sel,
OMAP4430_CM_L3INIT_MMC2_CLKCTRL, OMAP4430_CLKSEL_MASK,
hsmmc1_fclk_parents, func_dmic_abe_gfclk_ops);
-DEFINE_CLK_GATE(ocp2scp_usb_phy_phy_48m, "func_48m_fclk", &func_48m_fclk, 0x0,
- OMAP4430_CM_L3INIT_USBPHYOCP2SCP_CLKCTRL,
- OMAP4430_OPTFCLKEN_PHY_48M_SHIFT, 0x0, NULL);
-
DEFINE_CLK_GATE(sha2md5_fck, "l3_div_ck", &l3_div_ck, 0x0,
OMAP4430_CM_L4SEC_SHA2MD51_CLKCTRL,
OMAP4430_MODULEMODE_SWCTRL_SHIFT, 0x0, NULL);
@@ -1549,7 +1538,6 @@ static struct omap_clk omap44xx_clks[] = {
CLK(NULL, "per_mcbsp4_gfclk", &per_mcbsp4_gfclk, CK_443X),
CLK(NULL, "hsmmc1_fclk", &hsmmc1_fclk, CK_443X),
CLK(NULL, "hsmmc2_fclk", &hsmmc2_fclk, CK_443X),
- CLK(NULL, "ocp2scp_usb_phy_phy_48m", &ocp2scp_usb_phy_phy_48m, CK_443X),
CLK(NULL, "sha2md5_fck", &sha2md5_fck, CK_443X),
CLK(NULL, "slimbus1_fclk_1", &slimbus1_fclk_1, CK_443X),
CLK(NULL, "slimbus1_fclk_0", &slimbus1_fclk_0, CK_443X),
@@ -1717,13 +1705,5 @@ int __init omap4xxx_clk_init(void)
if (rc)
pr_err("%s: failed to configure ABE DPLL!\n", __func__);
- /*
- * Lock USB DPLL on OMAP4 devices so that the L3INIT power
- * domain can transition to retention state when not in use.
- */
- rc = clk_set_rate(&dpll_usb_ck, OMAP4_DPLL_USB_DEFFREQ);
- if (rc)
- pr_err("%s: failed to configure USB DPLL!\n", __func__);
-
return 0;
}
diff --git a/trunk/arch/arm/mach-omap2/common.h b/trunk/arch/arm/mach-omap2/common.h
index d6ba13e1c540..40f4a03d728f 100644
--- a/trunk/arch/arm/mach-omap2/common.h
+++ b/trunk/arch/arm/mach-omap2/common.h
@@ -293,8 +293,5 @@ extern void omap_reserve(void);
struct omap_hwmod;
extern int omap_dss_reset(struct omap_hwmod *);
-/* SoC specific clock initializer */
-extern int (*omap_clk_init)(void);
-
#endif /* __ASSEMBLER__ */
#endif /* __ARCH_ARM_MACH_OMAP2PLUS_COMMON_H */
diff --git a/trunk/arch/arm/mach-omap2/io.c b/trunk/arch/arm/mach-omap2/io.c
index 5c445ca1e271..2c3fdd65387b 100644
--- a/trunk/arch/arm/mach-omap2/io.c
+++ b/trunk/arch/arm/mach-omap2/io.c
@@ -54,12 +54,6 @@
#include "prm3xxx.h"
#include "prm44xx.h"
-/*
- * omap_clk_init: points to a function that does the SoC-specific
- * clock initializations
- */
-int (*omap_clk_init)(void);
-
/*
* The machine specific code may provide the extra mapping besides the
* default mapping provided here.
@@ -403,7 +397,7 @@ void __init omap2420_init_early(void)
omap242x_clockdomains_init();
omap2420_hwmod_init();
omap_hwmod_init_postsetup();
- omap_clk_init = omap2420_clk_init;
+ omap2420_clk_init();
}
void __init omap2420_init_late(void)
@@ -433,7 +427,7 @@ void __init omap2430_init_early(void)
omap243x_clockdomains_init();
omap2430_hwmod_init();
omap_hwmod_init_postsetup();
- omap_clk_init = omap2430_clk_init;
+ omap2430_clk_init();
}
void __init omap2430_init_late(void)
@@ -468,7 +462,7 @@ void __init omap3_init_early(void)
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
omap_hwmod_init_postsetup();
- omap_clk_init = omap3xxx_clk_init;
+ omap3xxx_clk_init();
}
void __init omap3430_init_early(void)
@@ -506,7 +500,7 @@ void __init ti81xx_init_early(void)
omap3xxx_clockdomains_init();
omap3xxx_hwmod_init();
omap_hwmod_init_postsetup();
- omap_clk_init = omap3xxx_clk_init;
+ omap3xxx_clk_init();
}
void __init omap3_init_late(void)
@@ -574,7 +568,7 @@ void __init am33xx_init_early(void)
am33xx_clockdomains_init();
am33xx_hwmod_init();
omap_hwmod_init_postsetup();
- omap_clk_init = am33xx_clk_init;
+ am33xx_clk_init();
}
#endif
@@ -599,7 +593,7 @@ void __init omap4430_init_early(void)
omap44xx_clockdomains_init();
omap44xx_hwmod_init();
omap_hwmod_init_postsetup();
- omap_clk_init = omap4xxx_clk_init;
+ omap4xxx_clk_init();
}
void __init omap4430_init_late(void)
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.c b/trunk/arch/arm/mach-omap2/omap_hwmod.c
index a202a4785104..c2c798c08c2b 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod.c
@@ -1368,9 +1368,7 @@ static void _enable_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- if (oh->flags & HWMOD_FORCE_MSTANDBY) {
- idlemode = HWMOD_IDLEMODE_FORCE;
- } else if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
+ if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
idlemode = HWMOD_IDLEMODE_NO;
} else {
if (sf & SYSC_HAS_ENAWAKEUP)
@@ -1442,8 +1440,7 @@ static void _idle_sysc(struct omap_hwmod *oh)
}
if (sf & SYSC_HAS_MIDLEMODE) {
- if ((oh->flags & HWMOD_SWSUP_MSTANDBY) ||
- (oh->flags & HWMOD_FORCE_MSTANDBY)) {
+ if (oh->flags & HWMOD_SWSUP_MSTANDBY) {
idlemode = HWMOD_IDLEMODE_FORCE;
} else {
if (sf & SYSC_HAS_ENAWAKEUP)
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod.h b/trunk/arch/arm/mach-omap2/omap_hwmod.h
index d5dc935f6060..d43d9b608eda 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod.h
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod.h
@@ -427,8 +427,8 @@ struct omap_hwmod_omap4_prcm {
*
* HWMOD_SWSUP_SIDLE: omap_hwmod code should manually bring module in and out
* of idle, rather than relying on module smart-idle
- * HWMOD_SWSUP_MSTANDBY: omap_hwmod code should manually bring module in and
- * out of standby, rather than relying on module smart-standby
+ * HWMOD_SWSUP_MSTDBY: omap_hwmod code should manually bring module in and out
+ * of standby, rather than relying on module smart-standby
* HWMOD_INIT_NO_RESET: don't reset this module at boot - important for
* SDRAM controller, etc. XXX probably belongs outside the main hwmod file
* XXX Should be HWMOD_SETUP_NO_RESET
@@ -459,10 +459,6 @@ struct omap_hwmod_omap4_prcm {
* correctly, or this is being abused to deal with some PM latency
* issues -- but we're currently suffering from a shortage of
* folks who are able to track these issues down properly.
- * HWMOD_FORCE_MSTANDBY: Always keep MIDLEMODE bits cleared so that device
- * is kept in force-standby mode. Failing to do so causes PM problems
- * with musb on OMAP3630 at least. Note that musb has a dedicated register
- * to control MSTANDBY signal when MIDLEMODE is set to force-standby.
*/
#define HWMOD_SWSUP_SIDLE (1 << 0)
#define HWMOD_SWSUP_MSTANDBY (1 << 1)
@@ -475,7 +471,6 @@ struct omap_hwmod_omap4_prcm {
#define HWMOD_16BIT_REG (1 << 8)
#define HWMOD_EXT_OPT_MAIN_CLK (1 << 9)
#define HWMOD_BLOCK_WFI (1 << 10)
-#define HWMOD_FORCE_MSTANDBY (1 << 11)
/*
* omap_hwmod._int_flags definitions
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 5112d04e7b79..ac7e03ec952f 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1707,14 +1707,9 @@ static struct omap_hwmod omap3xxx_usbhsotg_hwmod = {
* Erratum ID: i479 idle_req / idle_ack mechanism potentially
* broken when autoidle is enabled
* workaround is to disable the autoidle bit at module level.
- *
- * Enabling the device in any other MIDLEMODE setting but force-idle
- * causes core_pwrdm not enter idle states at least on OMAP3630.
- * Note that musb has OTG_FORCESTDBY register that controls MSTANDBY
- * signal when MIDLEMODE is set to force-idle.
*/
.flags = HWMOD_NO_OCP_AUTOIDLE | HWMOD_SWSUP_SIDLE
- | HWMOD_FORCE_MSTANDBY,
+ | HWMOD_SWSUP_MSTANDBY,
};
/* usb_otg_hs */
diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 9e0576569e07..0e47d2e1687c 100644
--- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2714,10 +2714,6 @@ static struct omap_ocp2scp_dev ocp2scp_dev_attr[] = {
{ }
};
-static struct omap_hwmod_opt_clk ocp2scp_usb_phy_opt_clks[] = {
- { .role = "48mhz", .clk = "ocp2scp_usb_phy_phy_48m" },
-};
-
/* ocp2scp_usb_phy */
static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
.name = "ocp2scp_usb_phy",
@@ -2732,8 +2728,6 @@ static struct omap_hwmod omap44xx_ocp2scp_usb_phy_hwmod = {
},
},
.dev_attr = ocp2scp_dev_attr,
- .opt_clks = ocp2scp_usb_phy_opt_clks,
- .opt_clks_cnt = ARRAY_SIZE(ocp2scp_usb_phy_opt_clks),
};
/*
diff --git a/trunk/arch/arm/mach-omap2/timer.c b/trunk/arch/arm/mach-omap2/timer.c
index f62b509ed08d..2bdd4cf17a8f 100644
--- a/trunk/arch/arm/mach-omap2/timer.c
+++ b/trunk/arch/arm/mach-omap2/timer.c
@@ -547,8 +547,6 @@ static inline void __init realtime_counter_init(void)
clksrc_nr, clksrc_src) \
void __init omap##name##_gptimer_timer_init(void) \
{ \
- if (omap_clk_init) \
- omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
omap2_gptimer_clocksource_init((clksrc_nr), clksrc_src); \
@@ -558,8 +556,6 @@ void __init omap##name##_gptimer_timer_init(void) \
clksrc_nr, clksrc_src) \
void __init omap##name##_sync32k_timer_init(void) \
{ \
- if (omap_clk_init) \
- omap_clk_init(); \
omap_dmtimer_init(); \
omap2_gp_clockevent_init((clkev_nr), clkev_src, clkev_prop); \
/* Enable the use of clocksource="gp_timer" kernel parameter */ \
diff --git a/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h b/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
index 1e73f5fa8659..b7a9f4d469e8 100644
--- a/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/trunk/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -188,8 +188,10 @@
#if defined(CONFIG_CPU_S3C2416)
#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
+#elif defined(CONFIG_CPU_S3C2443)
+#define NR_IRQS (IRQ_S3C2443_AC97+1)
#else
-#define NR_IRQS (IRQ_S3C2443_AC97 + 1)
+#define NR_IRQS (IRQ_S3C2440_AC97+1)
#endif
/* compatibility define. */
diff --git a/trunk/arch/arm/mach-s3c24xx/irq.c b/trunk/arch/arm/mach-s3c24xx/irq.c
index d8ba9bee4c7e..cb9f5e011e73 100644
--- a/trunk/arch/arm/mach-s3c24xx/irq.c
+++ b/trunk/arch/arm/mach-s3c24xx/irq.c
@@ -500,7 +500,7 @@ struct s3c_irq_intc *s3c24xx_init_intc(struct device_node *np,
base = (void *)0xfd000000;
intc->reg_mask = base + 0xa4;
- intc->reg_pending = base + 0xa8;
+ intc->reg_pending = base + 0x08;
irq_num = 20;
irq_start = S3C2410_IRQ(32);
irq_offset = 4;
diff --git a/trunk/arch/arm/mach-ux500/board-mop500-sdi.c b/trunk/arch/arm/mach-ux500/board-mop500-sdi.c
index 7f2cb6c5e2c1..051b62c27102 100644
--- a/trunk/arch/arm/mach-ux500/board-mop500-sdi.c
+++ b/trunk/arch/arm/mach-ux500/board-mop500-sdi.c
@@ -81,6 +81,7 @@ static struct stedma40_chan_cfg mop500_sdi0_dma_cfg_tx = {
#endif
struct mmci_platform_data mop500_sdi0_data = {
+ .ios_handler = mop500_sdi0_ios_handler,
.ocr_mask = MMC_VDD_29_30,
.f_max = 50000000,
.capabilities = MMC_CAP_4_BIT_DATA |
diff --git a/trunk/arch/arm/mach-ux500/board-mop500.c b/trunk/arch/arm/mach-ux500/board-mop500.c
index 87d2d7b38ce9..b03457881c4b 100644
--- a/trunk/arch/arm/mach-ux500/board-mop500.c
+++ b/trunk/arch/arm/mach-ux500/board-mop500.c
@@ -12,7 +12,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -440,15 +439,6 @@ static void mop500_prox_deactivate(struct device *dev)
regulator_put(prox_regulator);
}
-void mop500_snowball_ethernet_clock_enable(void)
-{
- struct clk *clk;
-
- clk = clk_get_sys("fsmc", NULL);
- if (!IS_ERR(clk))
- clk_prepare_enable(clk);
-}
-
static struct cryp_platform_data u8500_cryp1_platform_data = {
.mem_to_engine = {
.dir = STEDMA40_MEM_TO_PERIPH,
@@ -693,8 +683,6 @@ static void __init snowball_init_machine(void)
mop500_audio_init(parent);
mop500_uart_init(parent);
- mop500_snowball_ethernet_clock_enable();
-
/* This board has full regulator constraints */
regulator_has_full_constraints();
}
diff --git a/trunk/arch/arm/mach-ux500/board-mop500.h b/trunk/arch/arm/mach-ux500/board-mop500.h
index d38951be70df..eaa605f5d90d 100644
--- a/trunk/arch/arm/mach-ux500/board-mop500.h
+++ b/trunk/arch/arm/mach-ux500/board-mop500.h
@@ -104,7 +104,6 @@ void __init mop500_pinmaps_init(void);
void __init snowball_pinmaps_init(void);
void __init hrefv60_pinmaps_init(void);
void mop500_audio_init(struct device *parent);
-void mop500_snowball_ethernet_clock_enable(void);
int __init mop500_uib_init(void);
void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
diff --git a/trunk/arch/arm/mach-ux500/cpu-db8500.c b/trunk/arch/arm/mach-ux500/cpu-db8500.c
index f1a581844372..19235cf7bbe3 100644
--- a/trunk/arch/arm/mach-ux500/cpu-db8500.c
+++ b/trunk/arch/arm/mach-ux500/cpu-db8500.c
@@ -312,10 +312,9 @@ static void __init u8500_init_machine(void)
/* Pinmaps must be in place before devices register */
if (of_machine_is_compatible("st-ericsson,mop500"))
mop500_pinmaps_init();
- else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
+ else if (of_machine_is_compatible("calaosystems,snowball-a9500"))
snowball_pinmaps_init();
- mop500_snowball_ethernet_clock_enable();
- } else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
+ else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
hrefv60_pinmaps_init();
else if (of_machine_is_compatible("st-ericsson,ccu9540")) {}
/* TODO: Add pinmaps for ccu9540 board. */
diff --git a/trunk/arch/arm/mm/Kconfig b/trunk/arch/arm/mm/Kconfig
index 4045c4931a30..025d17328730 100644
--- a/trunk/arch/arm/mm/Kconfig
+++ b/trunk/arch/arm/mm/Kconfig
@@ -43,7 +43,7 @@ config CPU_ARM740T
depends on !MMU
select CPU_32v4T
select CPU_ABRT_LV4T
- select CPU_CACHE_V4
+ select CPU_CACHE_V3 # although the core is v4t
select CPU_CP15_MPU
select CPU_PABRT_LEGACY
help
@@ -469,6 +469,9 @@ config CPU_PABRT_V7
bool
# The cache model
+config CPU_CACHE_V3
+ bool
+
config CPU_CACHE_V4
bool
diff --git a/trunk/arch/arm/mm/Makefile b/trunk/arch/arm/mm/Makefile
index 9e51be96f635..4e333fa2756f 100644
--- a/trunk/arch/arm/mm/Makefile
+++ b/trunk/arch/arm/mm/Makefile
@@ -33,6 +33,7 @@ obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o
obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o
obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o
+obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o
obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o
obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o
obj-$(CONFIG_CPU_CACHE_V4WB) += cache-v4wb.o
diff --git a/trunk/arch/arm/mm/cache-feroceon-l2.c b/trunk/arch/arm/mm/cache-feroceon-l2.c
index 48bc3c0a87ce..dd3d59122cc3 100644
--- a/trunk/arch/arm/mm/cache-feroceon-l2.c
+++ b/trunk/arch/arm/mm/cache-feroceon-l2.c
@@ -343,7 +343,6 @@ void __init feroceon_l2_init(int __l2_wt_override)
outer_cache.inv_range = feroceon_l2_inv_range;
outer_cache.clean_range = feroceon_l2_clean_range;
outer_cache.flush_range = feroceon_l2_flush_range;
- outer_cache.inv_all = l2_inv_all;
enable_l2();
diff --git a/trunk/arch/arm/mm/cache-l2x0.c b/trunk/arch/arm/mm/cache-l2x0.c
index c465faca51b0..c2f37390308a 100644
--- a/trunk/arch/arm/mm/cache-l2x0.c
+++ b/trunk/arch/arm/mm/cache-l2x0.c
@@ -299,7 +299,7 @@ static void l2x0_unlock(u32 cache_id)
int lockregs;
int i;
- switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ switch (cache_id) {
case L2X0_CACHE_ID_PART_L310:
lockregs = 8;
break;
@@ -333,14 +333,15 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
- cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
+ cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID)
+ & L2X0_CACHE_ID_PART_MASK;
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
/* Determine the number of ways */
- switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
+ switch (cache_id) {
case L2X0_CACHE_ID_PART_L310:
if (aux & (1 << 16))
ways = 16;
@@ -724,6 +725,7 @@ static const struct l2x0_of_data pl310_data = {
.flush_all = l2x0_flush_all,
.inv_all = l2x0_inv_all,
.disable = l2x0_disable,
+ .set_debug = pl310_set_debug,
},
};
@@ -812,9 +814,10 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
data->save();
of_init = true;
- memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
l2x0_init(l2x0_base, aux_val, aux_mask);
+ memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
+
return 0;
}
#endif
diff --git a/trunk/arch/arm/mm/cache-v3.S b/trunk/arch/arm/mm/cache-v3.S
new file mode 100644
index 000000000000..8a3fadece8d3
--- /dev/null
+++ b/trunk/arch/arm/mm/cache-v3.S
@@ -0,0 +1,137 @@
+/*
+ * linux/arch/arm/mm/cache-v3.S
+ *
+ * Copyright (C) 1997-2002 Russell king
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include
+#include
+#include
+#include "proc-macros.S"
+
+/*
+ * flush_icache_all()
+ *
+ * Unconditionally clean and invalidate the entire icache.
+ */
+ENTRY(v3_flush_icache_all)
+ mov pc, lr
+ENDPROC(v3_flush_icache_all)
+
+/*
+ * flush_user_cache_all()
+ *
+ * Invalidate all cache entries in a particular address
+ * space.
+ *
+ * - mm - mm_struct describing address space
+ */
+ENTRY(v3_flush_user_cache_all)
+ /* FALLTHROUGH */
+/*
+ * flush_kern_cache_all()
+ *
+ * Clean and invalidate the entire cache.
+ */
+ENTRY(v3_flush_kern_cache_all)
+ /* FALLTHROUGH */
+
+/*
+ * flush_user_cache_range(start, end, flags)
+ *
+ * Invalidate a range of cache entries in the specified
+ * address space.
+ *
+ * - start - start address (may not be aligned)
+ * - end - end address (exclusive, may not be aligned)
+ * - flags - vma_area_struct flags describing address space
+ */
+ENTRY(v3_flush_user_cache_range)
+ mov ip, #0
+ mcreq p15, 0, ip, c7, c0, 0 @ flush ID cache
+ mov pc, lr
+
+/*
+ * coherent_kern_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_coherent_kern_range)
+ /* FALLTHROUGH */
+
+/*
+ * coherent_user_range(start, end)
+ *
+ * Ensure coherency between the Icache and the Dcache in the
+ * region described by start. If you have non-snooping
+ * Harvard caches, you need to implement this function.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_coherent_user_range)
+ mov r0, #0
+ mov pc, lr
+
+/*
+ * flush_kern_dcache_area(void *page, size_t size)
+ *
+ * Ensure no D cache aliasing occurs, either with itself or
+ * the I cache
+ *
+ * - addr - kernel address
+ * - size - region size
+ */
+ENTRY(v3_flush_kern_dcache_area)
+ /* FALLTHROUGH */
+
+/*
+ * dma_flush_range(start, end)
+ *
+ * Clean and invalidate the specified virtual address range.
+ *
+ * - start - virtual start address
+ * - end - virtual end address
+ */
+ENTRY(v3_dma_flush_range)
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 0 @ flush ID cache
+ mov pc, lr
+
+/*
+ * dma_unmap_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v3_dma_unmap_area)
+ teq r2, #DMA_TO_DEVICE
+ bne v3_dma_flush_range
+ /* FALLTHROUGH */
+
+/*
+ * dma_map_area(start, size, dir)
+ * - start - kernel virtual start address
+ * - size - size of region
+ * - dir - DMA direction
+ */
+ENTRY(v3_dma_map_area)
+ mov pc, lr
+ENDPROC(v3_dma_unmap_area)
+ENDPROC(v3_dma_map_area)
+
+ .globl v3_flush_kern_cache_louis
+ .equ v3_flush_kern_cache_louis, v3_flush_kern_cache_all
+
+ __INITDATA
+
+ @ define struct cpu_cache_fns (see and proc-macros.S)
+ define_cache_functions v3
diff --git a/trunk/arch/arm/mm/cache-v4.S b/trunk/arch/arm/mm/cache-v4.S
index a7ba68f59f0c..43e5d77be677 100644
--- a/trunk/arch/arm/mm/cache-v4.S
+++ b/trunk/arch/arm/mm/cache-v4.S
@@ -58,7 +58,7 @@ ENTRY(v4_flush_kern_cache_all)
ENTRY(v4_flush_user_cache_range)
#ifdef CONFIG_CPU_CP15
mov ip, #0
- mcr p15, 0, ip, c7, c7, 0 @ flush ID cache
+ mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
mov pc, lr
#else
/* FALLTHROUGH */
diff --git a/trunk/arch/arm/mm/context.c b/trunk/arch/arm/mm/context.c
index 2ac37372ef52..a5a4b2bc42ba 100644
--- a/trunk/arch/arm/mm/context.c
+++ b/trunk/arch/arm/mm/context.c
@@ -48,7 +48,7 @@ static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
-DEFINE_PER_CPU(atomic64_t, active_asids);
+static DEFINE_PER_CPU(atomic64_t, active_asids);
static DEFINE_PER_CPU(u64, reserved_asids);
static cpumask_t tlb_flush_pending;
@@ -215,7 +215,6 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
local_flush_bp_all();
local_flush_tlb_all();
- dummy_flush_tlb_a15_erratum();
}
atomic64_set(&per_cpu(active_asids, cpu), asid);
diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c
index a84ff763ac39..e95a996ab78f 100644
--- a/trunk/arch/arm/mm/mmu.c
+++ b/trunk/arch/arm/mm/mmu.c
@@ -34,7 +34,6 @@
#include
#include "mm.h"
-#include "tcm.h"
/*
* empty_zero_page is a special page that is used for
@@ -599,60 +598,39 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
} while (pte++, addr += PAGE_SIZE, addr != end);
}
-static void __init map_init_section(pmd_t *pmd, unsigned long addr,
- unsigned long end, phys_addr_t phys,
- const struct mem_type *type)
-{
-#ifndef CONFIG_ARM_LPAE
- /*
- * In classic MMU format, puds and pmds are folded in to
- * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
- * group of L1 entries making up one logical pointer to
- * an L2 table (2MB), where as PMDs refer to the individual
- * L1 entries (1MB). Hence increment to get the correct
- * offset for odd 1MB sections.
- * (See arch/arm/include/asm/pgtable-2level.h)
- */
- if (addr & SECTION_SIZE)
- pmd++;
-#endif
- do {
- *pmd = __pmd(phys | type->prot_sect);
- phys += SECTION_SIZE;
- } while (pmd++, addr += SECTION_SIZE, addr != end);
-
- flush_pmd_entry(pmd);
-}
-
-static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
+static void __init alloc_init_section(pud_t *pud, unsigned long addr,
unsigned long end, phys_addr_t phys,
const struct mem_type *type)
{
pmd_t *pmd = pmd_offset(pud, addr);
- unsigned long next;
- do {
- /*
- * With LPAE, we must loop over to map
- * all the pmds for the given range.
- */
- next = pmd_addr_end(addr, end);
+ /*
+ * Try a section mapping - end, addr and phys must all be aligned
+ * to a section boundary. Note that PMDs refer to the individual
+ * L1 entries, whereas PGDs refer to a group of L1 entries making
+ * up one logical pointer to an L2 table.
+ */
+ if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
+ pmd_t *p = pmd;
- /*
- * Try a section mapping - addr, next and phys must all be
- * aligned to a section boundary.
- */
- if (type->prot_sect &&
- ((addr | next | phys) & ~SECTION_MASK) == 0) {
- map_init_section(pmd, addr, next, phys, type);
- } else {
- alloc_init_pte(pmd, addr, next,
- __phys_to_pfn(phys), type);
- }
+#ifndef CONFIG_ARM_LPAE
+ if (addr & SECTION_SIZE)
+ pmd++;
+#endif
- phys += next - addr;
+ do {
+ *pmd = __pmd(phys | type->prot_sect);
+ phys += SECTION_SIZE;
+ } while (pmd++, addr += SECTION_SIZE, addr != end);
- } while (pmd++, addr = next, addr != end);
+ flush_pmd_entry(p);
+ } else {
+ /*
+ * No need to loop; pte's aren't interested in the
+ * individual L1 entries.
+ */
+ alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
+ }
}
static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
@@ -663,7 +641,7 @@ static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
do {
next = pud_addr_end(addr, end);
- alloc_init_pmd(pud, addr, next, phys, type);
+ alloc_init_section(pud, addr, next, phys, type);
phys += next - addr;
} while (pud++, addr = next, addr != end);
}
@@ -1278,7 +1256,6 @@ void __init paging_init(struct machine_desc *mdesc)
dma_contiguous_remap();
devicemaps_init(mdesc);
kmap_init();
- tcm_init();
top_pmd = pmd_off_k(0xffff0000);
diff --git a/trunk/arch/arm/mm/proc-arm740.S b/trunk/arch/arm/mm/proc-arm740.S
index fde2d2a794cf..dc5de5d53f20 100644
--- a/trunk/arch/arm/mm/proc-arm740.S
+++ b/trunk/arch/arm/mm/proc-arm740.S
@@ -77,27 +77,24 @@ __arm740_setup:
mcr p15, 0, r0, c6, c0 @ set area 0, default
ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
- ldr r3, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
- mov r4, #10 @ 11 is the minimum (4KB)
-1: add r4, r4, #1 @ area size *= 2
- movs r3, r3, lsr #1
+ ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
+ mov r2, #10 @ 11 is the minimum (4KB)
+1: add r2, r2, #1 @ area size *= 2
+ mov r1, r1, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r4, lsl #1 @ the area register value
+ orr r0, r0, r2, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
mcr p15, 0, r0, c6, c1 @ set area 1, RAM
ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
- ldr r3, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
- cmp r3, #0
- moveq r0, #0
- beq 2f
- mov r4, #10 @ 11 is the minimum (4KB)
-1: add r4, r4, #1 @ area size *= 2
- movs r3, r3, lsr #1
+ ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
+ mov r2, #10 @ 11 is the minimum (4KB)
+1: add r2, r2, #1 @ area size *= 2
+ mov r1, r1, lsr #1
bne 1b @ count not zero r-shift
- orr r0, r0, r4, lsl #1 @ the area register value
+ orr r0, r0, r2, lsl #1 @ the area register value
orr r0, r0, #1 @ set enable bit
-2: mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
+ mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
mov r0, #0x06
mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
@@ -140,14 +137,13 @@ __arm740_proc_info:
.long 0x41807400
.long 0xfffffff0
.long 0
- .long 0
b __arm740_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_26BIT
+ .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
.long cpu_arm740_name
.long arm740_processor_functions
.long 0
.long 0
- .long v4_cache_fns @ cache model
+ .long v3_cache_fns @ cache model
.size __arm740_proc_info, . - __arm740_proc_info
diff --git a/trunk/arch/arm/mm/proc-arm920.S b/trunk/arch/arm/mm/proc-arm920.S
index 2556cf1c2da1..2c3b9421ab5e 100644
--- a/trunk/arch/arm/mm/proc-arm920.S
+++ b/trunk/arch/arm/mm/proc-arm920.S
@@ -387,7 +387,7 @@ ENTRY(cpu_arm920_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm920_suspend_size
.equ cpu_arm920_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm920_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-arm926.S b/trunk/arch/arm/mm/proc-arm926.S
index 344c8a548cc0..f1803f7e2972 100644
--- a/trunk/arch/arm/mm/proc-arm926.S
+++ b/trunk/arch/arm/mm/proc-arm926.S
@@ -402,7 +402,7 @@ ENTRY(cpu_arm926_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */
.globl cpu_arm926_suspend_size
.equ cpu_arm926_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_arm926_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c13, c0, 0 @ PID
diff --git a/trunk/arch/arm/mm/proc-mohawk.S b/trunk/arch/arm/mm/proc-mohawk.S
index 0b60dd3d742a..82f9cdc751d6 100644
--- a/trunk/arch/arm/mm/proc-mohawk.S
+++ b/trunk/arch/arm/mm/proc-mohawk.S
@@ -350,7 +350,7 @@ ENTRY(cpu_mohawk_set_pte_ext)
.globl cpu_mohawk_suspend_size
.equ cpu_mohawk_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_mohawk_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-sa1100.S b/trunk/arch/arm/mm/proc-sa1100.S
index d92dfd081429..3aa0da11fd84 100644
--- a/trunk/arch/arm/mm/proc-sa1100.S
+++ b/trunk/arch/arm/mm/proc-sa1100.S
@@ -172,7 +172,7 @@ ENTRY(cpu_sa1100_set_pte_ext)
.globl cpu_sa1100_suspend_size
.equ cpu_sa1100_suspend_size, 4 * 3
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_sa1100_do_suspend)
stmfd sp!, {r4 - r6, lr}
mrc p15, 0, r4, c3, c0, 0 @ domain ID
diff --git a/trunk/arch/arm/mm/proc-syms.c b/trunk/arch/arm/mm/proc-syms.c
index 054b491ff764..3e6210b4d6d4 100644
--- a/trunk/arch/arm/mm/proc-syms.c
+++ b/trunk/arch/arm/mm/proc-syms.c
@@ -17,9 +17,7 @@
#ifndef MULTI_CPU
EXPORT_SYMBOL(cpu_dcache_clean_area);
-#ifdef CONFIG_MMU
EXPORT_SYMBOL(cpu_set_pte_ext);
-#endif
#else
EXPORT_SYMBOL(processor);
#endif
diff --git a/trunk/arch/arm/mm/proc-v6.S b/trunk/arch/arm/mm/proc-v6.S
index 5c07ee4fe3eb..bcaaa8de9325 100644
--- a/trunk/arch/arm/mm/proc-v6.S
+++ b/trunk/arch/arm/mm/proc-v6.S
@@ -138,7 +138,7 @@ ENTRY(cpu_v6_set_pte_ext)
/* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */
.globl cpu_v6_suspend_size
.equ cpu_v6_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_v6_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID
diff --git a/trunk/arch/arm/mm/proc-v7.S b/trunk/arch/arm/mm/proc-v7.S
index f584d3f5b37c..3a3c015f8d5c 100644
--- a/trunk/arch/arm/mm/proc-v7.S
+++ b/trunk/arch/arm/mm/proc-v7.S
@@ -420,7 +420,7 @@ __v7_pj4b_proc_info:
__v7_ca7mp_proc_info:
.long 0x410fc070
.long 0xff0ffff0
- __v7_proc __v7_ca7mp_setup
+ __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
.size __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
/*
@@ -430,24 +430,9 @@ __v7_ca7mp_proc_info:
__v7_ca15mp_proc_info:
.long 0x410fc0f0
.long 0xff0ffff0
- __v7_proc __v7_ca15mp_setup
+ __v7_proc __v7_ca15mp_setup, hwcaps = HWCAP_IDIV
.size __v7_ca15mp_proc_info, . - __v7_ca15mp_proc_info
- /*
- * Qualcomm Inc. Krait processors.
- */
- .type __krait_proc_info, #object
-__krait_proc_info:
- .long 0x510f0400 @ Required ID value
- .long 0xff0ffc00 @ Mask for ID
- /*
- * Some Krait processors don't indicate support for SDIV and UDIV
- * instructions in the ARM instruction set, even though they actually
- * do support them.
- */
- __v7_proc __v7_setup, hwcaps = HWCAP_IDIV
- .size __krait_proc_info, . - __krait_proc_info
-
/*
* Match any ARMv7 processor core.
*/
diff --git a/trunk/arch/arm/mm/proc-xsc3.S b/trunk/arch/arm/mm/proc-xsc3.S
index e8efd83b6f25..eb93d6487f35 100644
--- a/trunk/arch/arm/mm/proc-xsc3.S
+++ b/trunk/arch/arm/mm/proc-xsc3.S
@@ -413,7 +413,7 @@ ENTRY(cpu_xsc3_set_pte_ext)
.globl cpu_xsc3_suspend_size
.equ cpu_xsc3_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xsc3_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/arm/mm/proc-xscale.S b/trunk/arch/arm/mm/proc-xscale.S
index e766f889bfd6..25510361aa18 100644
--- a/trunk/arch/arm/mm/proc-xscale.S
+++ b/trunk/arch/arm/mm/proc-xscale.S
@@ -528,7 +528,7 @@ ENTRY(cpu_xscale_set_pte_ext)
.globl cpu_xscale_suspend_size
.equ cpu_xscale_suspend_size, 4 * 6
-#ifdef CONFIG_ARM_CPU_SUSPEND
+#ifdef CONFIG_PM_SLEEP
ENTRY(cpu_xscale_do_suspend)
stmfd sp!, {r4 - r9, lr}
mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
diff --git a/trunk/arch/avr32/include/asm/io.h b/trunk/arch/avr32/include/asm/io.h
index fc6483f83ccc..cf60d0a9f176 100644
--- a/trunk/arch/avr32/include/asm/io.h
+++ b/trunk/arch/avr32/include/asm/io.h
@@ -165,10 +165,6 @@ BUILDIO_IOPORT(l, u32)
#define readw_be __raw_readw
#define readl_be __raw_readl
-#define writeb_relaxed writeb
-#define writew_relaxed writew
-#define writel_relaxed writel
-
#define writeb_be __raw_writeb
#define writew_be __raw_writew
#define writel_be __raw_writel
diff --git a/trunk/arch/c6x/include/asm/irqflags.h b/trunk/arch/c6x/include/asm/irqflags.h
index 2c71d5634ec2..cf78e09e18c3 100644
--- a/trunk/arch/c6x/include/asm/irqflags.h
+++ b/trunk/arch/c6x/include/asm/irqflags.h
@@ -27,7 +27,7 @@ static inline unsigned long arch_local_save_flags(void)
/* set interrupt enabled status */
static inline void arch_local_irq_restore(unsigned long flags)
{
- asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags) : "memory");
+ asm volatile (" mvc .s2 %0,CSR\n" : : "b"(flags));
}
/* unconditionally enable interrupts */
diff --git a/trunk/arch/ia64/kernel/palinfo.c b/trunk/arch/ia64/kernel/palinfo.c
index 79521d5499f9..77597e5ea60a 100644
--- a/trunk/arch/ia64/kernel/palinfo.c
+++ b/trunk/arch/ia64/kernel/palinfo.c
@@ -849,6 +849,17 @@ static palinfo_entry_t palinfo_entries[]={
#define NR_PALINFO_ENTRIES (int) ARRAY_SIZE(palinfo_entries)
+/*
+ * this array is used to keep track of the proc entries we create. This is
+ * required in the module mode when we need to remove all entries. The procfs code
+ * does not do recursion of deletion
+ *
+ * Notes:
+ * - +1 accounts for the cpuN directory entry in /proc/pal
+ */
+#define NR_PALINFO_PROC_ENTRIES (NR_CPUS*(NR_PALINFO_ENTRIES+1))
+
+static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];
static struct proc_dir_entry *palinfo_dir;
/*
@@ -960,32 +971,60 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
static void __cpuinit
create_palinfo_proc_entries(unsigned int cpu)
{
+# define CPUSTR "cpu%d"
+
pal_func_cpu_u_t f;
+ struct proc_dir_entry **pdir;
struct proc_dir_entry *cpu_dir;
int j;
- char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
- sprintf(cpustr, "cpu%d", cpu);
+ char cpustr[sizeof(CPUSTR)];
+
+
+ /*
+ * we keep track of created entries in a depth-first order for
+ * cleanup purposes. Each entry is stored into palinfo_proc_entries
+ */
+ sprintf(cpustr,CPUSTR, cpu);
cpu_dir = proc_mkdir(cpustr, palinfo_dir);
- if (!cpu_dir)
- return;
f.req_cpu = cpu;
+ /*
+ * Compute the location to store per cpu entries
+ * We dont store the top level entry in this list, but
+ * remove it finally after removing all cpu entries.
+ */
+ pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)];
+ *pdir++ = cpu_dir;
for (j=0; j < NR_PALINFO_ENTRIES; j++) {
f.func_id = j;
- create_proc_read_entry(
- palinfo_entries[j].name, 0, cpu_dir,
- palinfo_read_entry, (void *)f.value);
+ *pdir = create_proc_read_entry(
+ palinfo_entries[j].name, 0, cpu_dir,
+ palinfo_read_entry, (void *)f.value);
+ pdir++;
}
}
static void
remove_palinfo_proc_entries(unsigned int hcpu)
{
- char cpustr[3+4+1]; /* cpu numbers are up to 4095 on itanic */
- sprintf(cpustr, "cpu%d", hcpu);
- remove_proc_subtree(cpustr, palinfo_dir);
+ int j;
+ struct proc_dir_entry *cpu_dir, **pdir;
+
+ pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)];
+ cpu_dir = *pdir;
+ *pdir++=NULL;
+ for (j=0; j < (NR_PALINFO_ENTRIES); j++) {
+ if ((*pdir)) {
+ remove_proc_entry ((*pdir)->name, cpu_dir);
+ *pdir ++= NULL;
+ }
+ }
+
+ if (cpu_dir) {
+ remove_proc_entry(cpu_dir->name, palinfo_dir);
+ }
}
static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
@@ -1019,8 +1058,6 @@ palinfo_init(void)
printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);
palinfo_dir = proc_mkdir("pal", NULL);
- if (!palinfo_dir)
- return -ENOMEM;
/* Create palinfo dirs in /proc for all online cpus */
for_each_online_cpu(i) {
@@ -1036,8 +1073,22 @@ palinfo_init(void)
static void __exit
palinfo_exit(void)
{
+ int i = 0;
+
+ /* remove all nodes: depth first pass. Could optimize this */
+ for_each_online_cpu(i) {
+ remove_palinfo_proc_entries(i);
+ }
+
+ /*
+ * Remove the top level entry finally
+ */
+ remove_proc_entry(palinfo_dir->name, NULL);
+
+ /*
+ * Unregister from cpu notifier callbacks
+ */
unregister_hotcpu_notifier(&palinfo_cpu_notifier);
- remove_proc_subtree("pal", NULL);
}
module_init(palinfo_init);
diff --git a/trunk/arch/m68k/include/asm/gpio.h b/trunk/arch/m68k/include/asm/gpio.h
index 8cc83431805b..4395ffc51fdb 100644
--- a/trunk/arch/m68k/include/asm/gpio.h
+++ b/trunk/arch/m68k/include/asm/gpio.h
@@ -86,24 +86,4 @@ static inline int gpio_cansleep(unsigned gpio)
return gpio < MCFGPIO_PIN_MAX ? 0 : __gpio_cansleep(gpio);
}
-static inline int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
-{
- int err;
-
- err = gpio_request(gpio, label);
- if (err)
- return err;
-
- if (flags & GPIOF_DIR_IN)
- err = gpio_direction_input(gpio);
- else
- err = gpio_direction_output(gpio,
- (flags & GPIOF_INIT_HIGH) ? 1 : 0);
-
- if (err)
- gpio_free(gpio);
-
- return err;
-}
-
#endif
diff --git a/trunk/arch/metag/mm/Kconfig b/trunk/arch/metag/mm/Kconfig
index ccf2576786ee..975f2f4e3ecf 100644
--- a/trunk/arch/metag/mm/Kconfig
+++ b/trunk/arch/metag/mm/Kconfig
@@ -93,6 +93,11 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
+config MAX_ACTIVE_REGIONS
+ int
+ default "2" if SPARSEMEM
+ default "1"
+
config ARCH_POPULATES_NODE_MAP
def_bool y
diff --git a/trunk/arch/mips/Kconfig b/trunk/arch/mips/Kconfig
index 51244bf97271..cd2e21ff562a 100644
--- a/trunk/arch/mips/Kconfig
+++ b/trunk/arch/mips/Kconfig
@@ -18,7 +18,7 @@ config MIPS
select HAVE_KRETPROBES
select HAVE_DEBUG_KMEMLEAK
select ARCH_BINFMT_ELF_RANDOMIZE_PIE
- select HAVE_ARCH_TRANSPARENT_HUGEPAGE if CPU_SUPPORTS_HUGEPAGES && 64BIT
+ select HAVE_ARCH_TRANSPARENT_HUGEPAGE
select RTC_LIB if !MACH_LOONGSON
select GENERIC_ATOMIC64 if !64BIT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
@@ -657,7 +657,7 @@ config SNI_RM
bool "SNI RM200/300/400"
select FW_ARC if CPU_LITTLE_ENDIAN
select FW_ARC32 if CPU_LITTLE_ENDIAN
- select FW_SNIPROM if CPU_BIG_ENDIAN
+ select SNIPROM if CPU_BIG_ENDIAN
select ARCH_MAY_HAVE_PC_FDC
select BOOT_ELF32
select CEVT_R4K
@@ -1144,7 +1144,7 @@ config DEFAULT_SGI_PARTITION
config FW_ARC32
bool
-config FW_SNIPROM
+config SNIPROM
bool
config BOOT_ELF32
@@ -1493,6 +1493,7 @@ config CPU_XLP
select CPU_SUPPORTS_32BIT_KERNEL
select CPU_SUPPORTS_64BIT_KERNEL
select CPU_SUPPORTS_HIGHMEM
+ select CPU_HAS_LLSC
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
select CPU_HAS_PREFETCH
diff --git a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c
index 9aa7d44898ed..ed1949c29508 100644
--- a/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c
+++ b/trunk/arch/mips/bcm63xx/boards/board_bcm963xx.c
@@ -745,7 +745,10 @@ void __init board_prom_init(void)
strcpy(cfe_version, "unknown");
printk(KERN_INFO PFX "CFE version: %s\n", cfe_version);
- bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET);
+ if (bcm63xx_nvram_init(boot_addr + BCM963XX_NVRAM_OFFSET)) {
+ printk(KERN_ERR PFX "invalid nvram checksum\n");
+ return;
+ }
board_name = bcm63xx_nvram_get_name();
/* find board by name */
diff --git a/trunk/arch/mips/bcm63xx/nvram.c b/trunk/arch/mips/bcm63xx/nvram.c
index a4b8864f9307..620611680839 100644
--- a/trunk/arch/mips/bcm63xx/nvram.c
+++ b/trunk/arch/mips/bcm63xx/nvram.c
@@ -38,7 +38,7 @@ struct bcm963xx_nvram {
static struct bcm963xx_nvram nvram;
static int mac_addr_used;
-void __init bcm63xx_nvram_init(void *addr)
+int __init bcm63xx_nvram_init(void *addr)
{
unsigned int check_len;
u32 crc, expected_crc;
@@ -60,8 +60,9 @@ void __init bcm63xx_nvram_init(void *addr)
crc = crc32_le(~0, (u8 *)&nvram, check_len);
if (crc != expected_crc)
- pr_warn("nvram checksum failed, contents may be invalid (expected %08x, got %08x)\n",
- expected_crc, crc);
+ return -EINVAL;
+
+ return 0;
}
u8 *bcm63xx_nvram_get_name(void)
diff --git a/trunk/arch/mips/bcm63xx/setup.c b/trunk/arch/mips/bcm63xx/setup.c
index 35e18e98beb9..314231be788c 100644
--- a/trunk/arch/mips/bcm63xx/setup.c
+++ b/trunk/arch/mips/bcm63xx/setup.c
@@ -157,4 +157,4 @@ int __init bcm63xx_register_devices(void)
return board_register_devices();
}
-arch_initcall(bcm63xx_register_devices);
+device_initcall(bcm63xx_register_devices);
diff --git a/trunk/arch/mips/cavium-octeon/setup.c b/trunk/arch/mips/cavium-octeon/setup.c
index b0baa299f899..c594a3d4f743 100644
--- a/trunk/arch/mips/cavium-octeon/setup.c
+++ b/trunk/arch/mips/cavium-octeon/setup.c
@@ -174,10 +174,7 @@ static int octeon_kexec_prepare(struct kimage *image)
static void octeon_generic_shutdown(void)
{
- int i;
-#ifdef CONFIG_SMP
- int cpu;
-#endif
+ int cpu, i;
struct cvmx_bootmem_desc *bootmem_desc;
void *named_block_array_ptr;
diff --git a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
index 4e0b6bc1165e..62d6a3b4d3b7 100644
--- a/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
+++ b/trunk/arch/mips/include/asm/mach-bcm63xx/bcm63xx_nvram.h
@@ -9,8 +9,10 @@
*
* Initialized the local nvram copy from the target address and checks
* its checksum.
+ *
+ * Returns 0 on success.
*/
-void bcm63xx_nvram_init(void *nvram);
+int __init bcm63xx_nvram_init(void *nvram);
/**
* bcm63xx_nvram_get_name() - returns the board name according to nvram
diff --git a/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h b/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
index 193c0912d38e..d9c828419037 100644
--- a/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
+++ b/trunk/arch/mips/include/asm/mach-sead3/cpu-feature-overrides.h
@@ -28,7 +28,11 @@
/* #define cpu_has_prefetch ? */
#define cpu_has_mcheck 1
/* #define cpu_has_ejtag ? */
+#ifdef CONFIG_CPU_HAS_LLSC
#define cpu_has_llsc 1
+#else
+#define cpu_has_llsc 0
+#endif
/* #define cpu_has_vtag_icache ? */
/* #define cpu_has_dc_aliases ? */
/* #define cpu_has_ic_fills_f_dc ? */
diff --git a/trunk/arch/mips/include/asm/mipsregs.h b/trunk/arch/mips/include/asm/mipsregs.h
index 0da44d422f5b..12b70c25906a 100644
--- a/trunk/arch/mips/include/asm/mipsregs.h
+++ b/trunk/arch/mips/include/asm/mipsregs.h
@@ -1166,10 +1166,7 @@ do { \
unsigned int __dspctl; \
\
__asm__ __volatile__( \
- " .set push \n" \
- " .set dsp \n" \
" rddsp %0, %x1 \n" \
- " .set pop \n" \
: "=r" (__dspctl) \
: "i" (mask)); \
__dspctl; \
@@ -1178,198 +1175,30 @@ do { \
#define wrdsp(val, mask) \
do { \
__asm__ __volatile__( \
- " .set push \n" \
- " .set dsp \n" \
" wrdsp %0, %x1 \n" \
- " .set pop \n" \
: \
: "r" (val), "i" (mask)); \
} while (0)
-#define mflo0() \
-({ \
- long mflo0; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mflo %0, $ac0 \n" \
- " .set pop \n" \
- : "=r" (mflo0)); \
- mflo0; \
-})
-
-#define mflo1() \
-({ \
- long mflo1; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mflo %0, $ac1 \n" \
- " .set pop \n" \
- : "=r" (mflo1)); \
- mflo1; \
-})
-
-#define mflo2() \
-({ \
- long mflo2; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mflo %0, $ac2 \n" \
- " .set pop \n" \
- : "=r" (mflo2)); \
- mflo2; \
-})
-
-#define mflo3() \
-({ \
- long mflo3; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mflo %0, $ac3 \n" \
- " .set pop \n" \
- : "=r" (mflo3)); \
- mflo3; \
-})
-
-#define mfhi0() \
-({ \
- long mfhi0; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mfhi %0, $ac0 \n" \
- " .set pop \n" \
- : "=r" (mfhi0)); \
- mfhi0; \
-})
-
-#define mfhi1() \
-({ \
- long mfhi1; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mfhi %0, $ac1 \n" \
- " .set pop \n" \
- : "=r" (mfhi1)); \
- mfhi1; \
-})
-
-#define mfhi2() \
-({ \
- long mfhi2; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mfhi %0, $ac2 \n" \
- " .set pop \n" \
- : "=r" (mfhi2)); \
- mfhi2; \
-})
-
-#define mfhi3() \
-({ \
- long mfhi3; \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mfhi %0, $ac3 \n" \
- " .set pop \n" \
- : "=r" (mfhi3)); \
- mfhi3; \
-})
-
-
-#define mtlo0(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mtlo %0, $ac0 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
-
-#define mtlo1(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mtlo %0, $ac1 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
-
-#define mtlo2(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mtlo %0, $ac2 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
-
-#define mtlo3(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mtlo %0, $ac3 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
-
-#define mthi0(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mthi %0, $ac0 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
-
-#define mthi1(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mthi %0, $ac1 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
-
-#define mthi2(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mthi %0, $ac2 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
-
-#define mthi3(x) \
-({ \
- __asm__( \
- " .set push \n" \
- " .set dsp \n" \
- " mthi %0, $ac3 \n" \
- " .set pop \n" \
- : \
- : "r" (x)); \
-})
+#define mflo0() ({ long mflo0; __asm__("mflo %0, $ac0" : "=r" (mflo0)); mflo0;})
+#define mflo1() ({ long mflo1; __asm__("mflo %0, $ac1" : "=r" (mflo1)); mflo1;})
+#define mflo2() ({ long mflo2; __asm__("mflo %0, $ac2" : "=r" (mflo2)); mflo2;})
+#define mflo3() ({ long mflo3; __asm__("mflo %0, $ac3" : "=r" (mflo3)); mflo3;})
+
+#define mfhi0() ({ long mfhi0; __asm__("mfhi %0, $ac0" : "=r" (mfhi0)); mfhi0;})
+#define mfhi1() ({ long mfhi1; __asm__("mfhi %0, $ac1" : "=r" (mfhi1)); mfhi1;})
+#define mfhi2() ({ long mfhi2; __asm__("mfhi %0, $ac2" : "=r" (mfhi2)); mfhi2;})
+#define mfhi3() ({ long mfhi3; __asm__("mfhi %0, $ac3" : "=r" (mfhi3)); mfhi3;})
+
+#define mtlo0(x) __asm__("mtlo %0, $ac0" ::"r" (x))
+#define mtlo1(x) __asm__("mtlo %0, $ac1" ::"r" (x))
+#define mtlo2(x) __asm__("mtlo %0, $ac2" ::"r" (x))
+#define mtlo3(x) __asm__("mtlo %0, $ac3" ::"r" (x))
+
+#define mthi0(x) __asm__("mthi %0, $ac0" ::"r" (x))
+#define mthi1(x) __asm__("mthi %0, $ac1" ::"r" (x))
+#define mthi2(x) __asm__("mthi %0, $ac2" ::"r" (x))
+#define mthi3(x) __asm__("mthi %0, $ac3" ::"r" (x))
#else
diff --git a/trunk/arch/mips/include/asm/signal.h b/trunk/arch/mips/include/asm/signal.h
index 8efe5a9e2c3e..197f6367c201 100644
--- a/trunk/arch/mips/include/asm/signal.h
+++ b/trunk/arch/mips/include/asm/signal.h
@@ -21,6 +21,6 @@
#include
#include
-#define __ARCH_HAS_IRIX_SIGACTION
+#define __ARCH_HAS_ODD_SIGACTION
#endif /* _ASM_SIGNAL_H */
diff --git a/trunk/arch/mips/include/uapi/asm/signal.h b/trunk/arch/mips/include/uapi/asm/signal.h
index addb9f556b71..d6b18b4d0f3a 100644
--- a/trunk/arch/mips/include/uapi/asm/signal.h
+++ b/trunk/arch/mips/include/uapi/asm/signal.h
@@ -72,12 +72,6 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
*
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
- *
- * SA_RESTORER used to be defined as 0x04000000 but only the O32 ABI ever
- * supported its use and no libc was using it, so the entire sa-restorer
- * functionality was removed with lmo commit 39bffc12c3580ab for 2.5.48
- * retaining only the SA_RESTORER definition as a reminder to avoid
- * accidental reuse of the mask bit.
*/
#define SA_ONSTACK 0x08000000
#define SA_RESETHAND 0x80000000
@@ -90,6 +84,8 @@ typedef unsigned long old_sigset_t; /* at least 32 bits */
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
+#define SA_RESTORER 0x04000000 /* Only for o32 */
+
#define MINSIGSTKSZ 2048
#define SIGSTKSZ 8192
diff --git a/trunk/arch/mips/kernel/Makefile b/trunk/arch/mips/kernel/Makefile
index de75fb50562b..f81d98f6184c 100644
--- a/trunk/arch/mips/kernel/Makefile
+++ b/trunk/arch/mips/kernel/Makefile
@@ -100,16 +100,29 @@ obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
obj-$(CONFIG_JUMP_LABEL) += jump_label.o
#
-# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is not
-# safe to unconditionnaly use the assembler -mdsp / -mdspr2 switches
-# here because the compiler may use DSP ASE instructions (such as lwx) in
-# code paths where we cannot check that the CPU we are running on supports it.
-# Proper abstraction using HAVE_AS_DSP and macros is done in
-# arch/mips/include/asm/mipsregs.h.
+# DSP ASE supported for MIPS32 or MIPS64 Release 2 cores only. It is safe
+# to enable DSP assembler support here even if the MIPS Release 2 CPU we
+# are targetting does not support DSP because all code-paths making use of
+# it properly check that the running CPU *actually does* support these
+# instructions.
#
ifeq ($(CONFIG_CPU_MIPSR2), y)
CFLAGS_DSP = -DHAVE_AS_DSP
+#
+# Check if assembler supports DSP ASE
+#
+ifeq ($(call cc-option-yn,-mdsp), y)
+CFLAGS_DSP += -mdsp
+endif
+
+#
+# Check if assembler supports DSP ASE Rev2
+#
+ifeq ($(call cc-option-yn,-mdspr2), y)
+CFLAGS_DSP += -mdspr2
+endif
+
CFLAGS_signal.o = $(CFLAGS_DSP)
CFLAGS_signal32.o = $(CFLAGS_DSP)
CFLAGS_process.o = $(CFLAGS_DSP)
diff --git a/trunk/arch/mips/kernel/cpu-probe.c b/trunk/arch/mips/kernel/cpu-probe.c
index 5fe66a0c3224..6bfccc227a95 100644
--- a/trunk/arch/mips/kernel/cpu-probe.c
+++ b/trunk/arch/mips/kernel/cpu-probe.c
@@ -580,9 +580,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
c->tlbsize = 48;
break;
case PRID_IMP_VR41XX:
- set_isa(c, MIPS_CPU_ISA_III);
- c->options = R4K_OPTS;
- c->tlbsize = 32;
switch (c->processor_id & 0xf0) {
case PRID_REV_VR4111:
c->cputype = CPU_VR4111;
@@ -607,7 +604,6 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC VR4131";
} else {
c->cputype = CPU_VR4133;
- c->options |= MIPS_CPU_LLSC;
__cpu_name[cpu] = "NEC VR4133";
}
break;
@@ -617,6 +613,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
__cpu_name[cpu] = "NEC Vr41xx";
break;
}
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->options = R4K_OPTS;
+ c->tlbsize = 32;
break;
case PRID_IMP_R4300:
c->cputype = CPU_R4300;
@@ -1227,8 +1226,10 @@ __cpuinit void cpu_probe(void)
if (c->options & MIPS_CPU_FPU) {
c->fpu_id = cpu_get_fpu_id();
- if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+ if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
+ c->isa_level == MIPS_CPU_ISA_M32R2 ||
+ c->isa_level == MIPS_CPU_ISA_M64R1 ||
+ c->isa_level == MIPS_CPU_ISA_M64R2) {
if (c->fpu_id & MIPS_FPIR_3D)
c->ases |= MIPS_ASE_MIPS3D;
}
diff --git a/trunk/arch/mips/kernel/linux32.c b/trunk/arch/mips/kernel/linux32.c
index db9655f08892..8eeee1c860c0 100644
--- a/trunk/arch/mips/kernel/linux32.c
+++ b/trunk/arch/mips/kernel/linux32.c
@@ -171,7 +171,7 @@ SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
err = compat_sys_shmctl(first, second, compat_ptr(ptr));
break;
default:
- err = -ENOSYS;
+ err = -EINVAL;
break;
}
diff --git a/trunk/arch/mips/kernel/mcount.S b/trunk/arch/mips/kernel/mcount.S
index 33d067148e61..165867673357 100644
--- a/trunk/arch/mips/kernel/mcount.S
+++ b/trunk/arch/mips/kernel/mcount.S
@@ -46,9 +46,10 @@
PTR_L a5, PT_R9(sp)
PTR_L a6, PT_R10(sp)
PTR_L a7, PT_R11(sp)
-#endif
+#else
PTR_ADDIU sp, PT_SIZE
- .endm
+#endif
+.endm
.macro RETURN_BACK
jr ra
@@ -67,11 +68,7 @@ NESTED(ftrace_caller, PT_SIZE, ra)
.globl _mcount
_mcount:
b ftrace_stub
-#ifdef CONFIG_32BIT
- addiu sp,sp,8
-#else
- nop
-#endif
+ addiu sp,sp,8
/* When tracing is activated, it calls ftrace_caller+8 (aka here) */
lw t1, function_trace_stop
diff --git a/trunk/arch/mips/kernel/proc.c b/trunk/arch/mips/kernel/proc.c
index 7a54f74b7818..135c4aadccbe 100644
--- a/trunk/arch/mips/kernel/proc.c
+++ b/trunk/arch/mips/kernel/proc.c
@@ -67,7 +67,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_mips_r) {
seq_printf(m, "isa\t\t\t:");
if (cpu_has_mips_1)
- seq_printf(m, "%s", " mips1");
+ seq_printf(m, "%s", "mips1");
if (cpu_has_mips_2)
seq_printf(m, "%s", " mips2");
if (cpu_has_mips_3)
diff --git a/trunk/arch/mips/kernel/traps.c b/trunk/arch/mips/kernel/traps.c
index c3abb88170fc..a200b5bdbb87 100644
--- a/trunk/arch/mips/kernel/traps.c
+++ b/trunk/arch/mips/kernel/traps.c
@@ -1571,7 +1571,7 @@ void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
#ifdef CONFIG_64BIT
status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
#endif
- if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
+ if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
status_set |= ST0_XX;
if (cpu_has_dsp)
status_set |= ST0_MX;
diff --git a/trunk/arch/mips/lib/bitops.c b/trunk/arch/mips/lib/bitops.c
index a64daee740ee..81f1dcfdcab8 100644
--- a/trunk/arch/mips/lib/bitops.c
+++ b/trunk/arch/mips/lib/bitops.c
@@ -90,12 +90,12 @@ int __mips_test_and_set_bit(unsigned long nr,
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- int res;
+ unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a) != 0;
+ res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
@@ -116,12 +116,12 @@ int __mips_test_and_set_bit_lock(unsigned long nr,
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- int res;
+ unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a) != 0;
+ res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return res;
@@ -141,12 +141,12 @@ int __mips_test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- int res;
+ unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a) != 0;
+ res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
return res;
@@ -166,12 +166,12 @@ int __mips_test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
unsigned bit = nr & SZLONG_MASK;
unsigned long mask;
unsigned long flags;
- int res;
+ unsigned long res;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
- res = (mask & *a) != 0;
+ res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
return res;
diff --git a/trunk/arch/mips/lib/csum_partial.S b/trunk/arch/mips/lib/csum_partial.S
index a6adffbb4e5f..507147aebd41 100644
--- a/trunk/arch/mips/lib/csum_partial.S
+++ b/trunk/arch/mips/lib/csum_partial.S
@@ -270,7 +270,7 @@ LEAF(csum_partial)
#endif
/* odd buffer alignment? */
-#ifdef CONFIG_CPU_MIPSR2
+#ifdef CPU_MIPSR2
wsbh v1, sum
movn sum, v1, t7
#else
@@ -670,7 +670,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
addu sum, v1
#endif
-#ifdef CONFIG_CPU_MIPSR2
+#ifdef CPU_MIPSR2
wsbh v1, sum
movn sum, v1, odd
#else
diff --git a/trunk/arch/mips/mm/c-r4k.c b/trunk/arch/mips/mm/c-r4k.c
index 2078915eacb9..ecca559b8d7b 100644
--- a/trunk/arch/mips/mm/c-r4k.c
+++ b/trunk/arch/mips/mm/c-r4k.c
@@ -1247,8 +1247,10 @@ static void __cpuinit setup_scache(void)
return;
default:
- if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
+ if (c->isa_level == MIPS_CPU_ISA_M32R1 ||
+ c->isa_level == MIPS_CPU_ISA_M32R2 ||
+ c->isa_level == MIPS_CPU_ISA_M64R1 ||
+ c->isa_level == MIPS_CPU_ISA_M64R2) {
#ifdef CONFIG_MIPS_CPU_SCACHE
if (mips_sc_init ()) {
scache_size = c->scache.ways * c->scache.sets * c->scache.linesz;
diff --git a/trunk/arch/mips/mm/sc-mips.c b/trunk/arch/mips/mm/sc-mips.c
index df96da7e939b..93d937b4b1ba 100644
--- a/trunk/arch/mips/mm/sc-mips.c
+++ b/trunk/arch/mips/mm/sc-mips.c
@@ -98,8 +98,10 @@ static inline int __init mips_sc_probe(void)
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
/* Ignore anything but MIPSxx processors */
- if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
- MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)))
+ if (c->isa_level != MIPS_CPU_ISA_M32R1 &&
+ c->isa_level != MIPS_CPU_ISA_M32R2 &&
+ c->isa_level != MIPS_CPU_ISA_M64R1 &&
+ c->isa_level != MIPS_CPU_ISA_M64R2)
return 0;
/* Does this MIPS32/MIPS64 CPU have a config2 register? */
diff --git a/trunk/arch/mips/pci/pci-alchemy.c b/trunk/arch/mips/pci/pci-alchemy.c
index d1faece21b6a..38a80c83fd67 100644
--- a/trunk/arch/mips/pci/pci-alchemy.c
+++ b/trunk/arch/mips/pci/pci-alchemy.c
@@ -19,7 +19,7 @@
#include
#include
-#ifdef CONFIG_PCI_DEBUG
+#ifdef CONFIG_DEBUG_PCI
#define DBG(x...) printk(KERN_DEBUG x)
#else
#define DBG(x...) do {} while (0)
@@ -162,7 +162,7 @@ static int config_access(unsigned char access_type, struct pci_bus *bus,
if (status & (1 << 29)) {
*data = 0xffffffff;
error = -1;
- DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d\n",
+ DBG("alchemy-pci: master abort on cfg access %d bus %d dev %d",
access_type, bus->number, device);
} else if ((status >> 28) & 0xf) {
DBG("alchemy-pci: PCI ERR detected: dev %d, status %lx\n",
diff --git a/trunk/arch/powerpc/Kconfig b/trunk/arch/powerpc/Kconfig
index a9ae67317374..ea5bb045983a 100644
--- a/trunk/arch/powerpc/Kconfig
+++ b/trunk/arch/powerpc/Kconfig
@@ -428,6 +428,11 @@ config NODES_SHIFT
default "4"
depends on NEED_MULTIPLE_NODES
+config MAX_ACTIVE_REGIONS
+ int
+ default "256" if PPC64
+ default "32"
+
config ARCH_SELECT_MEMORY_MODEL
def_bool y
depends on PPC64
@@ -642,14 +647,14 @@ menu "Bus options"
config ISA
bool "Support for ISA-bus hardware"
- depends on PPC_CHRP
+ depends on PPC_PREP || PPC_CHRP
select PPC_I8259
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. If you have an Apple machine, say N here; if you
- have an IBM RS/6000 or pSeries machine, say Y. If you have an
- embedded board, consult your board documentation.
+ have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
+ you have an embedded board, consult your board documentation.
config ZONE_DMA
bool
@@ -740,6 +745,7 @@ config PCI
bool "PCI support" if PPC_PCI_CHOICE
default y if !40x && !CPM2 && !8xx && !PPC_83xx \
&& !PPC_85xx && !PPC_86xx && !GAMECUBE_COMMON
+ default PCI_PERMEDIA if !4xx && !CPM2 && !8xx
default PCI_QSPAN if !4xx && !CPM2 && 8xx
select ARCH_SUPPORTS_MSI
select GENERIC_PCI_IOMAP
@@ -963,7 +969,7 @@ config TASK_SIZE_BOOL
config TASK_SIZE
hex "Size of user task space" if TASK_SIZE_BOOL
- default "0x80000000" if PPC_8xx
+ default "0x80000000" if PPC_PREP || PPC_8xx
default "0xc0000000"
config CONSISTENT_SIZE_BOOL
diff --git a/trunk/arch/powerpc/configs/ps3_defconfig b/trunk/arch/powerpc/configs/ps3_defconfig
index f79196232917..7a5c15fcc7cf 100644
--- a/trunk/arch/powerpc/configs/ps3_defconfig
+++ b/trunk/arch/powerpc/configs/ps3_defconfig
@@ -3,11 +3,11 @@ CONFIG_TUNE_CELL=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
+CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_LZMA=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EMBEDDED=y
# CONFIG_PERF_EVENTS is not set
diff --git a/trunk/arch/powerpc/include/asm/bitops.h b/trunk/arch/powerpc/include/asm/bitops.h
index 910194e9a1e2..08bd299c75b1 100644
--- a/trunk/arch/powerpc/include/asm/bitops.h
+++ b/trunk/arch/powerpc/include/asm/bitops.h
@@ -53,7 +53,7 @@
#define smp_mb__after_clear_bit() smp_mb()
/* Macro for generating the ***_bits() functions */
-#define DEFINE_BITOP(fn, op, prefix) \
+#define DEFINE_BITOP(fn, op, prefix, postfix) \
static __inline__ void fn(unsigned long mask, \
volatile unsigned long *_p) \
{ \
@@ -66,15 +66,16 @@ static __inline__ void fn(unsigned long mask, \
PPC405_ERR77(0,%3) \
PPC_STLCX "%0,0,%3\n" \
"bne- 1b\n" \
+ postfix \
: "=&r" (old), "+m" (*p) \
: "r" (mask), "r" (p) \
: "cc", "memory"); \
}
-DEFINE_BITOP(set_bits, or, "")
-DEFINE_BITOP(clear_bits, andc, "")
-DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER)
-DEFINE_BITOP(change_bits, xor, "")
+DEFINE_BITOP(set_bits, or, "", "")
+DEFINE_BITOP(clear_bits, andc, "", "")
+DEFINE_BITOP(clear_bits_unlock, andc, PPC_RELEASE_BARRIER, "")
+DEFINE_BITOP(change_bits, xor, "", "")
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
diff --git a/trunk/arch/powerpc/include/asm/cputable.h b/trunk/arch/powerpc/include/asm/cputable.h
index ccadad6db4e4..fb3245e928ea 100644
--- a/trunk/arch/powerpc/include/asm/cputable.h
+++ b/trunk/arch/powerpc/include/asm/cputable.h
@@ -52,7 +52,6 @@ struct cpu_spec {
char *cpu_name;
unsigned long cpu_features; /* Kernel features */
unsigned int cpu_user_features; /* Userland features */
- unsigned int cpu_user_features2; /* Userland features v2 */
unsigned int mmu_features; /* MMU features */
/* cache line sizes */
diff --git a/trunk/arch/powerpc/include/asm/dma.h b/trunk/arch/powerpc/include/asm/dma.h
index a5c6d83b5f60..f6813e919bb2 100644
--- a/trunk/arch/powerpc/include/asm/dma.h
+++ b/trunk/arch/powerpc/include/asm/dma.h
@@ -16,6 +16,10 @@
*
* None of this really applies for Power Macintoshes. There is
* basically just enough here to get kernel/dma.c to compile.
+ *
+ * There may be some comments or restrictions made here which are
+ * not valid for the PReP platform. Take what you read
+ * with a grain of salt.
*/
#include
@@ -53,6 +57,7 @@
* - page registers for 5-7 don't use data bit 0, represent 128K pages
* - page registers for 0-3 use bit 0, represent 64K pages
*
+ * On PReP, DMA transfers are limited to the lower 16MB of _physical_ memory.
* On CHRP, the W83C553F (and VLSI Tollgate?) support full 32 bit addressing.
* Note that addresses loaded into registers must be _physical_ addresses,
* not logical addresses (which may differ if paging is active).
diff --git a/trunk/arch/powerpc/include/asm/elf.h b/trunk/arch/powerpc/include/asm/elf.h
index cc0655a702a7..ac9790fc3836 100644
--- a/trunk/arch/powerpc/include/asm/elf.h
+++ b/trunk/arch/powerpc/include/asm/elf.h
@@ -61,7 +61,6 @@ typedef elf_vrregset_t elf_fpxregset_t;
instruction set this cpu supports. This could be done in userspace,
but it's not easy, and we've already done it here. */
# define ELF_HWCAP (cur_cpu_spec->cpu_user_features)
-# define ELF_HWCAP2 (cur_cpu_spec->cpu_user_features2)
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
diff --git a/trunk/arch/powerpc/include/asm/exception-64s.h b/trunk/arch/powerpc/include/asm/exception-64s.h
index 8e5fae8beaf6..05e6d2ee1db9 100644
--- a/trunk/arch/powerpc/include/asm/exception-64s.h
+++ b/trunk/arch/powerpc/include/asm/exception-64s.h
@@ -414,6 +414,7 @@ label##_relon_hv: \
#define SOFTEN_NOTEST_HV(vec) _SOFTEN_TEST(EXC_HV, vec)
#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
+ HMT_MEDIUM_PPR_DISCARD; \
SET_SCRATCH0(r13); /* save r13 */ \
EXCEPTION_PROLOG_0(PACA_EXGEN); \
__EXCEPTION_PROLOG_1(PACA_EXGEN, extra, vec); \
@@ -426,7 +427,6 @@ label##_relon_hv: \
. = loc; \
.globl label##_pSeries; \
label##_pSeries: \
- HMT_MEDIUM_PPR_DISCARD; \
_MASKABLE_EXCEPTION_PSERIES(vec, label, \
EXC_STD, SOFTEN_TEST_PR)
diff --git a/trunk/arch/powerpc/include/asm/firmware.h b/trunk/arch/powerpc/include/asm/firmware.h
index 0df54646f968..097dee57a7a9 100644
--- a/trunk/arch/powerpc/include/asm/firmware.h
+++ b/trunk/arch/powerpc/include/asm/firmware.h
@@ -18,6 +18,7 @@
#include
/* firmware feature bitmask values */
+#define FIRMWARE_MAX_FEATURES 63
#define FW_FEATURE_PFT ASM_CONST(0x0000000000000001)
#define FW_FEATURE_TCE ASM_CONST(0x0000000000000002)
@@ -50,8 +51,6 @@
#define FW_FEATURE_OPALv2 ASM_CONST(0x0000000020000000)
#define FW_FEATURE_SET_MODE ASM_CONST(0x0000000040000000)
#define FW_FEATURE_BEST_ENERGY ASM_CONST(0x0000000080000000)
-#define FW_FEATURE_TYPE1_AFFINITY ASM_CONST(0x0000000100000000)
-#define FW_FEATURE_PRRN ASM_CONST(0x0000000200000000)
#ifndef __ASSEMBLY__
@@ -66,8 +65,7 @@ enum {
FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR |
FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO |
- FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY |
- FW_FEATURE_TYPE1_AFFINITY | FW_FEATURE_PRRN,
+ FW_FEATURE_SET_MODE | FW_FEATURE_BEST_ENERGY,
FW_FEATURE_PSERIES_ALWAYS = 0,
FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
FW_FEATURE_POWERNV_ALWAYS = 0,
diff --git a/trunk/arch/powerpc/include/asm/hardirq.h b/trunk/arch/powerpc/include/asm/hardirq.h
index 3bdcfce2c42a..3147a2970125 100644
--- a/trunk/arch/powerpc/include/asm/hardirq.h
+++ b/trunk/arch/powerpc/include/asm/hardirq.h
@@ -10,9 +10,6 @@ typedef struct {
unsigned int pmu_irqs;
unsigned int mce_exceptions;
unsigned int spurious_irqs;
-#ifdef CONFIG_PPC_DOORBELL
- unsigned int doorbell_irqs;
-#endif
} ____cacheline_aligned irq_cpustat_t;
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
diff --git a/trunk/arch/powerpc/include/asm/io.h b/trunk/arch/powerpc/include/asm/io.h
index dd15e5e37d6d..f94ef4213e9d 100644
--- a/trunk/arch/powerpc/include/asm/io.h
+++ b/trunk/arch/powerpc/include/asm/io.h
@@ -15,6 +15,10 @@
extern int check_legacy_ioport(unsigned long base_port);
#define I8042_DATA_REG 0x60
#define FDC_BASE 0x3f0
+/* only relevant for PReP */
+#define _PIDXR 0x279
+#define _PNPWRP 0xa79
+#define PNPBIOS_BASE 0xf000
#if defined(CONFIG_PPC64) && defined(CONFIG_PCI)
extern struct pci_dev *isa_bridge_pcidev;
diff --git a/trunk/arch/powerpc/include/asm/opal.h b/trunk/arch/powerpc/include/asm/opal.h
index b6c8b58b1d76..a4b28f165b6c 100644
--- a/trunk/arch/powerpc/include/asm/opal.h
+++ b/trunk/arch/powerpc/include/asm/opal.h
@@ -117,7 +117,6 @@ extern int opal_enter_rtas(struct rtas_args *args,
#define OPAL_SET_SLOT_LED_STATUS 55
#define OPAL_GET_EPOW_STATUS 56
#define OPAL_SET_SYSTEM_ATTENTION_LED 57
-#define OPAL_PCI_MSI_EOI 63
#ifndef __ASSEMBLY__
@@ -507,7 +506,6 @@ int64_t opal_pci_get_xive_reissue(uint64_t phb_id, uint32_t xive_number,
uint8_t *p_bit, uint8_t *q_bit);
int64_t opal_pci_set_xive_reissue(uint64_t phb_id, uint32_t xive_number,
uint8_t p_bit, uint8_t q_bit);
-int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq);
int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number,
uint32_t xive_num);
int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num,
diff --git a/trunk/arch/powerpc/include/asm/page_64.h b/trunk/arch/powerpc/include/asm/page_64.h
index 88693cef4f3d..cd915d6b093d 100644
--- a/trunk/arch/powerpc/include/asm/page_64.h
+++ b/trunk/arch/powerpc/include/asm/page_64.h
@@ -99,7 +99,8 @@ extern unsigned long slice_get_unmapped_area(unsigned long addr,
unsigned long len,
unsigned long flags,
unsigned int psize,
- int topdown);
+ int topdown,
+ int use_cache);
extern unsigned int get_slice_psize(struct mm_struct *mm,
unsigned long addr);
diff --git a/trunk/arch/powerpc/include/asm/parport.h b/trunk/arch/powerpc/include/asm/parport.h
index a452968b29ea..6dc2577932b1 100644
--- a/trunk/arch/powerpc/include/asm/parport.h
+++ b/trunk/arch/powerpc/include/asm/parport.h
@@ -21,7 +21,9 @@ static int parport_pc_find_nonpci_ports (int autoirq, int autodma)
int count = 0;
int virq;
- for_each_compatible_node(np, "parallel", "pnpPNP,400") {
+ for (np = NULL; (np = of_find_compatible_node(np,
+ "parallel",
+ "pnpPNP,400")) != NULL;) {
prop = of_get_property(np, "reg", &propsize);
if (!prop || propsize > 6*sizeof(u32))
continue;
diff --git a/trunk/arch/powerpc/include/asm/perf_event_server.h b/trunk/arch/powerpc/include/asm/perf_event_server.h
index f265049dd7d6..d0aec72722e9 100644
--- a/trunk/arch/powerpc/include/asm/perf_event_server.h
+++ b/trunk/arch/powerpc/include/asm/perf_event_server.h
@@ -33,8 +33,6 @@ struct power_pmu {
unsigned long *valp);
int (*get_alternatives)(u64 event_id, unsigned int flags,
u64 alt[]);
- u64 (*bhrb_filter_map)(u64 branch_sample_type);
- void (*config_bhrb)(u64 pmu_bhrb_filter);
void (*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
int (*limited_pmc_event)(u64 event_id);
u32 flags;
@@ -44,9 +42,6 @@ struct power_pmu {
int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
-
- /* BHRB entries in the PMU */
- int bhrb_nr;
};
/*
@@ -57,9 +52,6 @@ struct power_pmu {
#define PPMU_NO_SIPR 0x00000004 /* no SIPR/HV in MMCRA at all */
#define PPMU_NO_CONT_SAMPLING 0x00000008 /* no continuous sampling */
#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
-#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
-#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
-#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */
/*
* Values for flags to get_alternatives()
@@ -73,7 +65,6 @@ extern int register_power_pmu(struct power_pmu *);
struct pt_regs;
extern unsigned long perf_misc_flags(struct pt_regs *regs);
extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
-extern unsigned long int read_bhrb(int n);
/*
* Only override the default definitions in include/linux/perf_event.h
diff --git a/trunk/arch/powerpc/include/asm/ppc-opcode.h b/trunk/arch/powerpc/include/asm/ppc-opcode.h
index 0c34e4803499..8752bc8e34a3 100644
--- a/trunk/arch/powerpc/include/asm/ppc-opcode.h
+++ b/trunk/arch/powerpc/include/asm/ppc-opcode.h
@@ -82,8 +82,6 @@
#define __REGA0_R31 31
/* sorted alphabetically */
-#define PPC_INST_BHRBE 0x7c00025c
-#define PPC_INST_CLRBHRB 0x7c00035c
#define PPC_INST_DCBA 0x7c0005ec
#define PPC_INST_DCBA_MASK 0xfc0007fe
#define PPC_INST_DCBAL 0x7c2005ec
@@ -299,12 +297,6 @@
#define PPC_NAP stringify_in_c(.long PPC_INST_NAP)
#define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP)
-/* BHRB instructions */
-#define PPC_CLRBHRB stringify_in_c(.long PPC_INST_CLRBHRB)
-#define PPC_MFBHRBE(r, n) stringify_in_c(.long PPC_INST_BHRBE | \
- __PPC_RT(r) | \
- (((n) & 0x3ff) << 11))
-
/* Transactional memory instructions */
#define TRECHKPT stringify_in_c(.long PPC_INST_TRECHKPT)
#define TRECLAIM(r) stringify_in_c(.long PPC_INST_TRECLAIM \
diff --git a/trunk/arch/powerpc/include/asm/processor.h b/trunk/arch/powerpc/include/asm/processor.h
index 0a4cc5d649e1..7ff9eaa3ea6c 100644
--- a/trunk/arch/powerpc/include/asm/processor.h
+++ b/trunk/arch/powerpc/include/asm/processor.h
@@ -40,7 +40,7 @@
* -- BenH.
*/
-/* PREP sub-platform types. Unused */
+/* PREP sub-platform types see residual.h for these */
#define _PREP_Motorola 0x01 /* motorola prep */
#define _PREP_Firm 0x02 /* firmworks prep */
#define _PREP_IBM 0x00 /* ibm prep */
@@ -56,6 +56,13 @@
extern int _chrp_type;
+#ifdef CONFIG_PPC_PREP
+
+/* what kind of prep workstation we are */
+extern int _prep_type;
+
+#endif /* CONFIG_PPC_PREP */
+
#endif /* defined(__KERNEL__) && defined(CONFIG_PPC32) */
/*
diff --git a/trunk/arch/powerpc/include/asm/prom.h b/trunk/arch/powerpc/include/asm/prom.h
index bc2da154f68b..99c92d5363e4 100644
--- a/trunk/arch/powerpc/include/asm/prom.h
+++ b/trunk/arch/powerpc/include/asm/prom.h
@@ -74,75 +74,6 @@ struct of_drconf_cell {
#define DRCONF_MEM_AI_INVALID 0x00000040
#define DRCONF_MEM_RESERVED 0x00000080
-/*
- * There are two methods for telling firmware what our capabilities are.
- * Newer machines have an "ibm,client-architecture-support" method on the
- * root node. For older machines, we have to call the "process-elf-header"
- * method in the /packages/elf-loader node, passing it a fake 32-bit
- * ELF header containing a couple of PT_NOTE sections that contain
- * structures that contain various information.
- */
-
-/* New method - extensible architecture description vector. */
-
-/* Option vector bits - generic bits in byte 1 */
-#define OV_IGNORE 0x80 /* ignore this vector */
-#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
-
-/* Option vector 1: processor architectures supported */
-#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
-#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
-#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
-#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
-#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
-#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
-#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
-#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
-
-/* Option vector 2: Open Firmware options supported */
-#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
-
-/* Option vector 3: processor options supported */
-#define OV3_FP 0x80 /* floating point */
-#define OV3_VMX 0x40 /* VMX/Altivec */
-#define OV3_DFP 0x20 /* decimal FP */
-
-/* Option vector 4: IBM PAPR implementation */
-#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
-
-/* Option vector 5: PAPR/OF options supported
- * These bits are also used in firmware_has_feature() to validate
- * the capabilities reported for vector 5 in the device tree so we
- * encode the vector index in the define and use the OV5_FEAT()
- * and OV5_INDX() macros to extract the desired information.
- */
-#define OV5_FEAT(x) ((x) & 0xff)
-#define OV5_INDX(x) ((x) >> 8)
-#define OV5_LPAR 0x0280 /* logical partitioning supported */
-#define OV5_SPLPAR 0x0240 /* shared-processor LPAR supported */
-/* ibm,dynamic-reconfiguration-memory property supported */
-#define OV5_DRCONF_MEMORY 0x0220
-#define OV5_LARGE_PAGES 0x0210 /* large pages supported */
-#define OV5_DONATE_DEDICATE_CPU 0x0202 /* donate dedicated CPU support */
-#define OV5_MSI 0x0201 /* PCIe/MSI support */
-#define OV5_CMO 0x0480 /* Cooperative Memory Overcommitment */
-#define OV5_XCMO 0x0440 /* Page Coalescing */
-#define OV5_TYPE1_AFFINITY 0x0580 /* Type 1 NUMA affinity */
-#define OV5_PRRN 0x0540 /* Platform Resource Reassignment */
-#define OV5_PFO_HW_RNG 0x0E80 /* PFO Random Number Generator */
-#define OV5_PFO_HW_842 0x0E40 /* PFO Compression Accelerator */
-#define OV5_PFO_HW_ENCR 0x0E20 /* PFO Encryption Accelerator */
-#define OV5_SUB_PROCESSORS 0x0F01 /* 1,2,or 4 Sub-Processors supported */
-
-/* Option Vector 6: IBM PAPR hints */
-#define OV6_LINUX 0x02 /* Linux is our OS */
-
-/*
- * The architecture vector has an array of PVR mask/value pairs,
- * followed by # option vectors - 1, followed by the option vectors.
- */
-extern unsigned char ibm_architecture_vec[];
-
/* These includes are put at the bottom because they may contain things
* that are overridden by this file. Ideally they shouldn't be included
* by this file, but there are a bunch of .c files that currently depend
diff --git a/trunk/arch/powerpc/include/asm/ptrace.h b/trunk/arch/powerpc/include/asm/ptrace.h
index becc08e6a65c..5f995681bc1d 100644
--- a/trunk/arch/powerpc/include/asm/ptrace.h
+++ b/trunk/arch/powerpc/include/asm/ptrace.h
@@ -92,8 +92,7 @@ static inline long regs_return_value(struct pt_regs *regs)
} while(0)
struct task_struct;
-extern int ptrace_get_reg(struct task_struct *task, int regno,
- unsigned long *data);
+extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
extern int ptrace_put_reg(struct task_struct *task, int regno,
unsigned long data);
diff --git a/trunk/arch/powerpc/include/asm/reg.h b/trunk/arch/powerpc/include/asm/reg.h
index 5c6fbe2c5ce6..c9c67fc888c9 100644
--- a/trunk/arch/powerpc/include/asm/reg.h
+++ b/trunk/arch/powerpc/include/asm/reg.h
@@ -268,13 +268,6 @@
#define SPRN_FSCR 0x099 /* Facility Status & Control Register */
#define FSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
#define FSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
-#define SPRN_HFSCR 0xbe /* HV=1 Facility Status & Control Register */
-#define HFSCR_TAR (1 << (63-55)) /* Enable Target Address Register */
-#define HFSCR_TM (1 << (63-58)) /* Enable Transactional Memory */
-#define HFSCR_PM (1 << (63-60)) /* Enable prob/priv access to PMU SPRs */
-#define HFSCR_DSCR (1 << (63-61)) /* Enable Data Stream Control Register */
-#define HFSCR_VECVSX (1 << (63-62)) /* Enable VMX/VSX */
-#define HFSCR_FP (1 << (63-63)) /* Enable Floating Point */
#define SPRN_TAR 0x32f /* Target Address Register */
#define SPRN_LPCR 0x13E /* LPAR Control Register */
#define LPCR_VPM0 (1ul << (63-0))
@@ -638,7 +631,6 @@
#define MMCR0_FCWAIT 0x00000002UL /* freeze counter in WAIT state */
#define MMCR0_FCHV 0x00000001UL /* freeze conditions in hypervisor mode */
#define SPRN_MMCR1 798
-#define SPRN_MMCR2 769
#define SPRN_MMCRA 0x312
#define MMCRA_SDSYNC 0x80000000UL /* SDAR synced with SIAR */
#define MMCRA_SDAR_DCACHE_MISS 0x40000000UL
@@ -657,10 +649,6 @@
#define POWER7P_MMCRA_SIAR_VALID 0x10000000 /* P7+ SIAR contents valid */
#define POWER7P_MMCRA_SDAR_VALID 0x08000000 /* P7+ SDAR contents valid */
-#define SPRN_MMCRH 316 /* Hypervisor monitor mode control register */
-#define SPRN_MMCRS 894 /* Supervisor monitor mode control register */
-#define SPRN_MMCRC 851 /* Core monitor mode control register */
-
#define SPRN_PMC1 787
#define SPRN_PMC2 788
#define SPRN_PMC3 789
@@ -671,11 +659,6 @@
#define SPRN_PMC8 794
#define SPRN_SIAR 780
#define SPRN_SDAR 781
-#define SPRN_SIER 784
-#define SIER_SIPR 0x2000000 /* Sampled MSR_PR */
-#define SIER_SIHV 0x1000000 /* Sampled MSR_HV */
-#define SIER_SIAR_VALID 0x0400000 /* SIAR contents valid */
-#define SIER_SDAR_VALID 0x0200000 /* SDAR contents valid */
#define SPRN_PA6T_MMCR0 795
#define PA6T_MMCR0_EN0 0x0000000000000001UL
diff --git a/trunk/arch/powerpc/include/asm/rtas.h b/trunk/arch/powerpc/include/asm/rtas.h
index a8bc2bb4adc9..aef00c675905 100644
--- a/trunk/arch/powerpc/include/asm/rtas.h
+++ b/trunk/arch/powerpc/include/asm/rtas.h
@@ -143,8 +143,6 @@ struct rtas_suspend_me_data {
#define RTAS_TYPE_PMGM_TIME_ALARM 0x6f
#define RTAS_TYPE_PMGM_CONFIG_CHANGE 0x70
#define RTAS_TYPE_PMGM_SERVICE_PROC 0x71
-/* Platform Resource Reassignment Notification */
-#define RTAS_TYPE_PRRN 0xA0
/* RTAS check-exception vector offset */
#define RTAS_VECTOR_EXTERNAL_INTERRUPT 0x500
@@ -279,10 +277,6 @@ extern int early_init_dt_scan_rtas(unsigned long node,
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
-#ifdef CONFIG_PPC_PSERIES
-extern int pseries_devicetree_update(s32 scope);
-#endif
-
#ifdef CONFIG_PPC_RTAS_DAEMON
extern void rtas_cancel_event_scan(void);
#else
diff --git a/trunk/arch/powerpc/include/asm/smp.h b/trunk/arch/powerpc/include/asm/smp.h
index ffbaabebcdca..195ce2ac5691 100644
--- a/trunk/arch/powerpc/include/asm/smp.h
+++ b/trunk/arch/powerpc/include/asm/smp.h
@@ -143,8 +143,6 @@ extern void __cpu_die(unsigned int cpu);
/* for UP */
#define hard_smp_processor_id() get_hard_smp_processor_id(0)
#define smp_setup_cpu_maps()
-static inline void inhibit_secondary_onlining(void) {}
-static inline void uninhibit_secondary_onlining(void) {}
#endif /* CONFIG_SMP */
diff --git a/trunk/arch/powerpc/include/asm/topology.h b/trunk/arch/powerpc/include/asm/topology.h
index 161ab662843b..852ed1b384f6 100644
--- a/trunk/arch/powerpc/include/asm/topology.h
+++ b/trunk/arch/powerpc/include/asm/topology.h
@@ -71,7 +71,6 @@ static inline void sysfs_remove_device_from_node(struct device *dev,
#if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR)
extern int start_topology_update(void);
extern int stop_topology_update(void);
-extern int prrn_is_enabled(void);
#else
static inline int start_topology_update(void)
{
@@ -81,10 +80,6 @@ static inline int stop_topology_update(void)
{
return 0;
}
-static inline int prrn_is_enabled(void)
-{
- return 0;
-}
#endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */
#include
diff --git a/trunk/arch/powerpc/include/asm/xics.h b/trunk/arch/powerpc/include/asm/xics.h
index 282d43a0c855..4ae9a09c3b89 100644
--- a/trunk/arch/powerpc/include/asm/xics.h
+++ b/trunk/arch/powerpc/include/asm/xics.h
@@ -150,7 +150,6 @@ extern void xics_register_ics(struct ics *ics);
extern void xics_teardown_cpu(void);
extern void xics_kexec_teardown_cpu(int secondary);
extern void xics_migrate_irqs_away(void);
-extern void icp_native_eoi(struct irq_data *d);
#ifdef CONFIG_SMP
extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
unsigned int strict_check);
diff --git a/trunk/arch/powerpc/include/uapi/asm/ptrace.h b/trunk/arch/powerpc/include/uapi/asm/ptrace.h
index 77d2ed35b111..66b9ca4ee94a 100644
--- a/trunk/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/trunk/arch/powerpc/include/uapi/asm/ptrace.h
@@ -211,7 +211,6 @@ struct ppc_debug_info {
#define PPC_DEBUG_FEATURE_INSN_BP_MASK 0x0000000000000002
#define PPC_DEBUG_FEATURE_DATA_BP_RANGE 0x0000000000000004
#define PPC_DEBUG_FEATURE_DATA_BP_MASK 0x0000000000000008
-#define PPC_DEBUG_FEATURE_DATA_BP_DAWR 0x0000000000000010
#ifndef __ASSEMBLY__
diff --git a/trunk/arch/powerpc/kernel/cpu_setup_power.S b/trunk/arch/powerpc/kernel/cpu_setup_power.S
index e0c419b8d65b..ea847abb0d0a 100644
--- a/trunk/arch/powerpc/kernel/cpu_setup_power.S
+++ b/trunk/arch/powerpc/kernel/cpu_setup_power.S
@@ -49,7 +49,6 @@ _GLOBAL(__restore_cpu_power7)
_GLOBAL(__setup_cpu_power8)
mflr r11
bl __init_FSCR
- bl __init_PMU
bl __init_hvmode_206
mtlr r11
beqlr
@@ -58,28 +57,22 @@ _GLOBAL(__setup_cpu_power8)
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
- bl __init_HFSCR
bl __init_TLB
- bl __init_PMU_HV
mtlr r11
blr
_GLOBAL(__restore_cpu_power8)
mflr r11
bl __init_FSCR
- bl __init_PMU
mfmsr r3
rldicl. r0,r3,4,63
- mtlr r11
beqlr
li r0,0
mtspr SPRN_LPID,r0
mfspr r3,SPRN_LPCR
oris r3, r3, LPCR_AIL_3@h
bl __init_LPCR
- bl __init_HFSCR
bl __init_TLB
- bl __init_PMU_HV
mtlr r11
blr
@@ -127,12 +120,6 @@ __init_FSCR:
mtspr SPRN_FSCR,r3
blr
-__init_HFSCR:
- mfspr r3,SPRN_HFSCR
- ori r3,r3,HFSCR_TAR|HFSCR_TM|HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_PM
- mtspr SPRN_HFSCR,r3
- blr
-
__init_TLB:
/* Clear the TLB */
li r6,128
@@ -144,18 +131,3 @@ __init_TLB:
bdnz 2b
ptesync
1: blr
-
-__init_PMU_HV:
- li r5,0
- mtspr SPRN_MMCRC,r5
- mtspr SPRN_MMCRH,r5
- blr
-
-__init_PMU:
- li r5,0
- mtspr SPRN_MMCRS,r5
- mtspr SPRN_MMCRA,r5
- mtspr SPRN_MMCR0,r5
- mtspr SPRN_MMCR1,r5
- mtspr SPRN_MMCR2,r5
- blr
diff --git a/trunk/arch/powerpc/kernel/dbell.c b/trunk/arch/powerpc/kernel/dbell.c
index d55c76c571f3..9ebbc24bb23c 100644
--- a/trunk/arch/powerpc/kernel/dbell.c
+++ b/trunk/arch/powerpc/kernel/dbell.c
@@ -41,8 +41,6 @@ void doorbell_exception(struct pt_regs *regs)
may_hard_irq_enable();
- __get_cpu_var(irq_stat).doorbell_irqs++;
-
smp_ipi_demux();
irq_exit();
diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S
index 04d69c4a5ac2..256c5bf0adb7 100644
--- a/trunk/arch/powerpc/kernel/entry_64.S
+++ b/trunk/arch/powerpc/kernel/entry_64.S
@@ -304,7 +304,7 @@ syscall_exit_work:
subi r12,r12,TI_FLAGS
4: /* Anything else left to do? */
- SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */
+ SET_DEFAULT_THREAD_PPR(r3, r9) /* Set thread.ppr = 3 */
andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
beq .ret_from_except_lite
@@ -657,7 +657,7 @@ resume_kernel:
/* Clear _TIF_EMULATE_STACK_STORE flag */
lis r11,_TIF_EMULATE_STACK_STORE@h
addi r5,r9,TI_FLAGS
-0: ldarx r4,0,r5
+ ldarx r4,0,r5
andc r4,r4,r11
stdcx. r4,0,r5
bne- 0b
diff --git a/trunk/arch/powerpc/kernel/exceptions-64s.S b/trunk/arch/powerpc/kernel/exceptions-64s.S
index 80d56f094a0d..56bd92362ce1 100644
--- a/trunk/arch/powerpc/kernel/exceptions-64s.S
+++ b/trunk/arch/powerpc/kernel/exceptions-64s.S
@@ -235,7 +235,6 @@ instruction_access_slb_pSeries:
.globl hardware_interrupt_hv;
hardware_interrupt_pSeries:
hardware_interrupt_hv:
- HMT_MEDIUM_PPR_DISCARD
BEGIN_FTR_SECTION
_MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
EXC_HV, SOFTEN_TEST_HV)
@@ -255,11 +254,7 @@ hardware_interrupt_hv:
STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
- . = 0x900
- .globl decrementer_pSeries
-decrementer_pSeries:
- _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
-
+ MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer)
STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
@@ -802,7 +797,7 @@ hardware_interrupt_relon_hv:
_MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
FTR_SECTION_ELSE
_MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
- ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
+ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_206)
STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
@@ -875,6 +870,10 @@ tm_unavailable_relon_pSeries_1:
. = 0x5500
b denorm_exception_hv
#endif
+#ifdef CONFIG_HVC_SCOM
+ STD_RELON_EXCEPTION_HV(0x5600, 0x1600, maintence_interrupt)
+ KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1600)
+#endif /* CONFIG_HVC_SCOM */
STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
/* Other future vectors */
diff --git a/trunk/arch/powerpc/kernel/head_64.S b/trunk/arch/powerpc/kernel/head_64.S
index b61363d557b5..0886ae6dd5be 100644
--- a/trunk/arch/powerpc/kernel/head_64.S
+++ b/trunk/arch/powerpc/kernel/head_64.S
@@ -509,7 +509,6 @@ _GLOBAL(copy_and_flush)
sync
addi r5,r5,8
addi r6,r6,8
- isync
blr
.align 8
diff --git a/trunk/arch/powerpc/kernel/iommu.c b/trunk/arch/powerpc/kernel/iommu.c
index c0d0dbddfba1..31c4fdc6859c 100644
--- a/trunk/arch/powerpc/kernel/iommu.c
+++ b/trunk/arch/powerpc/kernel/iommu.c
@@ -102,7 +102,7 @@ static int __init fail_iommu_debugfs(void)
struct dentry *dir = fault_create_debugfs_attr("fail_iommu",
NULL, &fail_iommu);
- return PTR_RET(dir);
+ return IS_ERR(dir) ? PTR_ERR(dir) : 0;
}
late_initcall(fail_iommu_debugfs);
diff --git a/trunk/arch/powerpc/kernel/irq.c b/trunk/arch/powerpc/kernel/irq.c
index 5cbcf4d5a808..4f97fe345526 100644
--- a/trunk/arch/powerpc/kernel/irq.c
+++ b/trunk/arch/powerpc/kernel/irq.c
@@ -374,15 +374,6 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
seq_printf(p, " Machine check exceptions\n");
-#ifdef CONFIG_PPC_DOORBELL
- if (cpu_has_feature(CPU_FTR_DBELL)) {
- seq_printf(p, "%*s: ", prec, "DBL");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
- seq_printf(p, " Doorbell interrupts\n");
- }
-#endif
-
return 0;
}
@@ -396,9 +387,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
sum += per_cpu(irq_stat, cpu).pmu_irqs;
sum += per_cpu(irq_stat, cpu).mce_exceptions;
sum += per_cpu(irq_stat, cpu).spurious_irqs;
-#ifdef CONFIG_PPC_DOORBELL
- sum += per_cpu(irq_stat, cpu).doorbell_irqs;
-#endif
return sum;
}
diff --git a/trunk/arch/powerpc/kernel/kgdb.c b/trunk/arch/powerpc/kernel/kgdb.c
index c1eef241017a..5ca82cd4a374 100644
--- a/trunk/arch/powerpc/kernel/kgdb.c
+++ b/trunk/arch/powerpc/kernel/kgdb.c
@@ -159,7 +159,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
if (user_mode(regs))
return 0;
- backup_current_thread_info = kmalloc(sizeof(struct thread_info), GFP_KERNEL);
+ backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL);
/*
* On Book E and perhaps other processors, singlestep is handled on
* the critical exception stack. This causes current_thread_info()
diff --git a/trunk/arch/powerpc/kernel/lparcfg.c b/trunk/arch/powerpc/kernel/lparcfg.c
index ab297f492c67..f5725bce9ed2 100644
--- a/trunk/arch/powerpc/kernel/lparcfg.c
+++ b/trunk/arch/powerpc/kernel/lparcfg.c
@@ -301,7 +301,6 @@ static void parse_system_parameter_string(struct seq_file *m)
__pa(rtas_data_buf),
RTAS_DATA_BUF_SIZE);
memcpy(local_buffer, rtas_data_buf, SPLPAR_MAXLENGTH);
- local_buffer[SPLPAR_MAXLENGTH - 1] = '\0';
spin_unlock(&rtas_data_buf_lock);
if (call_status != 0) {
diff --git a/trunk/arch/powerpc/kernel/nvram_64.c b/trunk/arch/powerpc/kernel/nvram_64.c
index 48fbc2b97e95..bec1e930ed73 100644
--- a/trunk/arch/powerpc/kernel/nvram_64.c
+++ b/trunk/arch/powerpc/kernel/nvram_64.c
@@ -511,7 +511,8 @@ int __init nvram_scan_partitions(void)
"detected: 0-length partition\n");
goto out;
}
- tmp_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
+ tmp_part = (struct nvram_partition *)
+ kmalloc(sizeof(struct nvram_partition), GFP_KERNEL);
err = -ENOMEM;
if (!tmp_part) {
printk(KERN_ERR "nvram_scan_partitions: kmalloc failed\n");
diff --git a/trunk/arch/powerpc/kernel/pci-common.c b/trunk/arch/powerpc/kernel/pci-common.c
index f325dc923409..fa12ae42d98c 100644
--- a/trunk/arch/powerpc/kernel/pci-common.c
+++ b/trunk/arch/powerpc/kernel/pci-common.c
@@ -30,7 +30,6 @@
#include
#include
#include
-#include
#include
#include
@@ -1024,27 +1023,6 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pci_dma_bus_setup(bus);
}
-void pcibios_setup_device(struct pci_dev *dev)
-{
- /* Fixup NUMA node as it may not be setup yet by the generic
- * code and is needed by the DMA init
- */
- set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
-
- /* Hook up default DMA ops */
- set_dma_ops(&dev->dev, pci_dma_ops);
- set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
-
- /* Additional platform DMA/iommu setup */
- if (ppc_md.pci_dma_dev_setup)
- ppc_md.pci_dma_dev_setup(dev);
-
- /* Read default IRQs and fixup if necessary */
- pci_read_irq_line(dev);
- if (ppc_md.pci_irq_fixup)
- ppc_md.pci_irq_fixup(dev);
-}
-
void pcibios_setup_bus_devices(struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -1059,7 +1037,23 @@ void pcibios_setup_bus_devices(struct pci_bus *bus)
if (dev->is_added)
continue;
- pcibios_setup_device(dev);
+ /* Fixup NUMA node as it may not be setup yet by the generic
+ * code and is needed by the DMA init
+ */
+ set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
+
+ /* Hook up default DMA ops */
+ set_dma_ops(&dev->dev, pci_dma_ops);
+ set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
+
+ /* Additional platform DMA/iommu setup */
+ if (ppc_md.pci_dma_dev_setup)
+ ppc_md.pci_dma_dev_setup(dev);
+
+ /* Read default IRQs and fixup if necessary */
+ pci_read_irq_line(dev);
+ if (ppc_md.pci_irq_fixup)
+ ppc_md.pci_irq_fixup(dev);
}
}
@@ -1500,10 +1494,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
if (ppc_md.pcibios_enable_device_hook(dev))
return -EINVAL;
- /* avoid pcie irq fix up impact on cardbus */
- if (dev->hdr_type != PCI_HEADER_TYPE_CARDBUS)
- pcibios_setup_device(dev);
-
return pci_enable_resources(dev, mask);
}
@@ -1735,15 +1725,3 @@ static void fixup_hide_host_resource_fsl(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_resource_fsl);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
-
-static void fixup_vga(struct pci_dev *pdev)
-{
- u16 cmd;
-
- pci_read_config_word(pdev, PCI_COMMAND, &cmd);
- if ((cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) || !vga_default_device())
- vga_set_default_device(pdev);
-
-}
-DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_ANY_ID, PCI_ANY_ID,
- PCI_CLASS_DISPLAY_VGA, 8, fixup_vga);
diff --git a/trunk/arch/powerpc/kernel/process.c b/trunk/arch/powerpc/kernel/process.c
index c0dea6f23567..59dd545fdde1 100644
--- a/trunk/arch/powerpc/kernel/process.c
+++ b/trunk/arch/powerpc/kernel/process.c
@@ -555,12 +555,10 @@ static inline void tm_recheckpoint_new_task(struct task_struct *new)
new->thread.regs->msr |=
(MSR_FP | new->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(&new->thread);
new->thread.regs->msr |= MSR_VEC;
}
-#endif
/* We may as well turn on VSX too since all the state is restored now */
if (msr & MSR_VSX)
new->thread.regs->msr |= MSR_VSX;
@@ -912,6 +910,10 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ flush_ptrace_hw_breakpoint(src);
+#endif /* CONFIG_HAVE_HW_BREAKPOINT */
+
*dst = *src;
return 0;
}
@@ -982,10 +984,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
-#ifdef CONFIG_HAVE_HW_BREAKPOINT
- p->thread.ptrace_bps[0] = NULL;
-#endif
-
#ifdef CONFIG_PPC_STD_MMU_64
if (mmu_has_feature(MMU_FTR_SLB)) {
unsigned long sp_vsid;
diff --git a/trunk/arch/powerpc/kernel/prom_init.c b/trunk/arch/powerpc/kernel/prom_init.c
index 5eccda9fd33f..13f8d168b3f1 100644
--- a/trunk/arch/powerpc/kernel/prom_init.c
+++ b/trunk/arch/powerpc/kernel/prom_init.c
@@ -627,11 +627,16 @@ static void __init early_cmdline_parse(void)
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
/*
- * The architecture vector has an array of PVR mask/value pairs,
- * followed by # option vectors - 1, followed by the option vectors.
- *
- * See prom.h for the definition of the bits specified in the
- * architecture vector.
+ * There are two methods for telling firmware what our capabilities are.
+ * Newer machines have an "ibm,client-architecture-support" method on the
+ * root node. For older machines, we have to call the "process-elf-header"
+ * method in the /packages/elf-loader node, passing it a fake 32-bit
+ * ELF header containing a couple of PT_NOTE sections that contain
+ * structures that contain various information.
+ */
+
+/*
+ * New method - extensible architecture description vector.
*
* Because the description vector contains a mix of byte and word
* values, we declare it as an unsigned char array, and use this
@@ -640,7 +645,65 @@ static void __init early_cmdline_parse(void)
#define W(x) ((x) >> 24) & 0xff, ((x) >> 16) & 0xff, \
((x) >> 8) & 0xff, (x) & 0xff
-unsigned char ibm_architecture_vec[] = {
+/* Option vector bits - generic bits in byte 1 */
+#define OV_IGNORE 0x80 /* ignore this vector */
+#define OV_CESSATION_POLICY 0x40 /* halt if unsupported option present*/
+
+/* Option vector 1: processor architectures supported */
+#define OV1_PPC_2_00 0x80 /* set if we support PowerPC 2.00 */
+#define OV1_PPC_2_01 0x40 /* set if we support PowerPC 2.01 */
+#define OV1_PPC_2_02 0x20 /* set if we support PowerPC 2.02 */
+#define OV1_PPC_2_03 0x10 /* set if we support PowerPC 2.03 */
+#define OV1_PPC_2_04 0x08 /* set if we support PowerPC 2.04 */
+#define OV1_PPC_2_05 0x04 /* set if we support PowerPC 2.05 */
+#define OV1_PPC_2_06 0x02 /* set if we support PowerPC 2.06 */
+#define OV1_PPC_2_07 0x01 /* set if we support PowerPC 2.07 */
+
+/* Option vector 2: Open Firmware options supported */
+#define OV2_REAL_MODE 0x20 /* set if we want OF in real mode */
+
+/* Option vector 3: processor options supported */
+#define OV3_FP 0x80 /* floating point */
+#define OV3_VMX 0x40 /* VMX/Altivec */
+#define OV3_DFP 0x20 /* decimal FP */
+
+/* Option vector 4: IBM PAPR implementation */
+#define OV4_MIN_ENT_CAP 0x01 /* minimum VP entitled capacity */
+
+/* Option vector 5: PAPR/OF options supported */
+#define OV5_LPAR 0x80 /* logical partitioning supported */
+#define OV5_SPLPAR 0x40 /* shared-processor LPAR supported */
+/* ibm,dynamic-reconfiguration-memory property supported */
+#define OV5_DRCONF_MEMORY 0x20
+#define OV5_LARGE_PAGES 0x10 /* large pages supported */
+#define OV5_DONATE_DEDICATE_CPU 0x02 /* donate dedicated CPU support */
+/* PCIe/MSI support. Without MSI full PCIe is not supported */
+#ifdef CONFIG_PCI_MSI
+#define OV5_MSI 0x01 /* PCIe/MSI support */
+#else
+#define OV5_MSI 0x00
+#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_PPC_SMLPAR
+#define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */
+#define OV5_XCMO 0x40 /* Page Coalescing */
+#else
+#define OV5_CMO 0x00
+#define OV5_XCMO 0x00
+#endif
+#define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */
+#define OV5_PFO_HW_RNG 0x80 /* PFO Random Number Generator */
+#define OV5_PFO_HW_842 0x40 /* PFO Compression Accelerator */
+#define OV5_PFO_HW_ENCR 0x20 /* PFO Encryption Accelerator */
+#define OV5_SUB_PROCESSORS 0x01 /* 1,2,or 4 Sub-Processors supported */
+
+/* Option Vector 6: IBM PAPR hints */
+#define OV6_LINUX 0x02 /* Linux is our OS */
+
+/*
+ * The architecture vector has an array of PVR mask/value pairs,
+ * followed by # option vectors - 1, followed by the option vectors.
+ */
+static unsigned char ibm_architecture_vec[] = {
W(0xfffe0000), W(0x003a0000), /* POWER5/POWER5+ */
W(0xffff0000), W(0x003e0000), /* POWER6 */
W(0xffff0000), W(0x003f0000), /* POWER7 */
@@ -684,21 +747,11 @@ unsigned char ibm_architecture_vec[] = {
/* option vector 5: PAPR/OF options */
19 - 2, /* length */
0, /* don't ignore, don't halt */
- OV5_FEAT(OV5_LPAR) | OV5_FEAT(OV5_SPLPAR) | OV5_FEAT(OV5_LARGE_PAGES) |
- OV5_FEAT(OV5_DRCONF_MEMORY) | OV5_FEAT(OV5_DONATE_DEDICATE_CPU) |
-#ifdef CONFIG_PCI_MSI
- /* PCIe/MSI support. Without MSI full PCIe is not supported */
- OV5_FEAT(OV5_MSI),
-#else
+ OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY |
+ OV5_DONATE_DEDICATE_CPU | OV5_MSI,
0,
-#endif
- 0,
-#ifdef CONFIG_PPC_SMLPAR
- OV5_FEAT(OV5_CMO) | OV5_FEAT(OV5_XCMO),
-#else
- 0,
-#endif
- OV5_FEAT(OV5_TYPE1_AFFINITY) | OV5_FEAT(OV5_PRRN),
+ OV5_CMO | OV5_XCMO,
+ OV5_TYPE1_AFFINITY,
0,
0,
0,
@@ -712,9 +765,8 @@ unsigned char ibm_architecture_vec[] = {
0,
0,
0,
- OV5_FEAT(OV5_PFO_HW_RNG) | OV5_FEAT(OV5_PFO_HW_ENCR) |
- OV5_FEAT(OV5_PFO_HW_842),
- OV5_FEAT(OV5_SUB_PROCESSORS),
+ OV5_PFO_HW_RNG | OV5_PFO_HW_ENCR | OV5_PFO_HW_842,
+ OV5_SUB_PROCESSORS,
/* option vector 6: IBM PAPR hints */
4 - 2, /* length */
0,
diff --git a/trunk/arch/powerpc/kernel/ptrace.c b/trunk/arch/powerpc/kernel/ptrace.c
index 3b14d320e69f..f9b30c68ba47 100644
--- a/trunk/arch/powerpc/kernel/ptrace.c
+++ b/trunk/arch/powerpc/kernel/ptrace.c
@@ -180,10 +180,9 @@ static int set_user_msr(struct task_struct *task, unsigned long msr)
}
#ifdef CONFIG_PPC64
-static int get_user_dscr(struct task_struct *task, unsigned long *data)
+static unsigned long get_user_dscr(struct task_struct *task)
{
- *data = task->thread.dscr;
- return 0;
+ return task->thread.dscr;
}
static int set_user_dscr(struct task_struct *task, unsigned long dscr)
@@ -193,7 +192,7 @@ static int set_user_dscr(struct task_struct *task, unsigned long dscr)
return 0;
}
#else
-static int get_user_dscr(struct task_struct *task, unsigned long *data)
+static unsigned long get_user_dscr(struct task_struct *task)
{
return -EIO;
}
@@ -217,23 +216,19 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
/*
* Get contents of register REGNO in task TASK.
*/
-int ptrace_get_reg(struct task_struct *task, int regno, unsigned long *data)
+unsigned long ptrace_get_reg(struct task_struct *task, int regno)
{
- if ((task->thread.regs == NULL) || !data)
+ if (task->thread.regs == NULL)
return -EIO;
- if (regno == PT_MSR) {
- *data = get_user_msr(task);
- return 0;
- }
+ if (regno == PT_MSR)
+ return get_user_msr(task);
if (regno == PT_DSCR)
- return get_user_dscr(task, data);
+ return get_user_dscr(task);
- if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long))) {
- *data = ((unsigned long *)task->thread.regs)[regno];
- return 0;
- }
+ if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
+ return ((unsigned long *)task->thread.regs)[regno];
return -EIO;
}
@@ -1565,9 +1560,7 @@ long arch_ptrace(struct task_struct *child, long request,
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
- ret = ptrace_get_reg(child, (int) index, &tmp);
- if (ret)
- break;
+ tmp = ptrace_get_reg(child, (int) index);
} else {
unsigned int fpidx = index - PT_FPR0;
@@ -1644,8 +1637,6 @@ long arch_ptrace(struct task_struct *child, long request,
dbginfo.sizeof_condition = 0;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
dbginfo.features = PPC_DEBUG_FEATURE_DATA_BP_RANGE;
- if (cpu_has_feature(CPU_FTR_DAWR))
- dbginfo.features |= PPC_DEBUG_FEATURE_DATA_BP_DAWR;
#else
dbginfo.features = 0;
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
diff --git a/trunk/arch/powerpc/kernel/ptrace32.c b/trunk/arch/powerpc/kernel/ptrace32.c
index f51599e941c7..c0244e766834 100644
--- a/trunk/arch/powerpc/kernel/ptrace32.c
+++ b/trunk/arch/powerpc/kernel/ptrace32.c
@@ -95,9 +95,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
CHECK_FULL_REGS(child->thread.regs);
if (index < PT_FPR0) {
- ret = ptrace_get_reg(child, index, &tmp);
- if (ret)
- break;
+ tmp = ptrace_get_reg(child, index);
} else {
flush_fp_to_thread(child);
/*
@@ -150,11 +148,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
tmp = ((u64 *)child->thread.fpr)
[FPRINDEX_3264(numReg)];
} else { /* register within PT_REGS struct */
- unsigned long tmp2;
- ret = ptrace_get_reg(child, numReg, &tmp2);
- if (ret)
- break;
- tmp = tmp2;
+ tmp = ptrace_get_reg(child, numReg);
}
reg32bits = ((u32*)&tmp)[part];
ret = put_user(reg32bits, (u32 __user *)data);
@@ -238,10 +232,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
break;
CHECK_FULL_REGS(child->thread.regs);
if (numReg < PT_FPR0) {
- unsigned long freg;
- ret = ptrace_get_reg(child, numReg, &freg);
- if (ret)
- break;
+ unsigned long freg = ptrace_get_reg(child, numReg);
if (index % 2)
freg = (freg & ~0xfffffffful) | (data & 0xfffffffful);
else
diff --git a/trunk/arch/powerpc/kernel/rtas_flash.c b/trunk/arch/powerpc/kernel/rtas_flash.c
index a3e4034b6843..c642f0132988 100644
--- a/trunk/arch/powerpc/kernel/rtas_flash.c
+++ b/trunk/arch/powerpc/kernel/rtas_flash.c
@@ -57,31 +57,13 @@
#define VALIDATE_READY -1001 /* Firmware image ready for validation */
#define VALIDATE_PARAM_ERR -3 /* RTAS Parameter Error */
#define VALIDATE_HW_ERR -1 /* RTAS Hardware Error */
-
-/* ibm,validate-flash-image update result tokens */
-#define VALIDATE_TMP_UPDATE 0 /* T side will be updated */
-#define VALIDATE_FLASH_AUTH 1 /* Partition does not have authority */
-#define VALIDATE_INVALID_IMG 2 /* Candidate image is not valid */
-#define VALIDATE_CUR_UNKNOWN 3 /* Current fixpack level is unknown */
-/*
- * Current T side will be committed to P side before being replace with new
- * image, and the new image is downlevel from current image
- */
-#define VALIDATE_TMP_COMMIT_DL 4
-/*
- * Current T side will be committed to P side before being replaced with new
- * image
- */
-#define VALIDATE_TMP_COMMIT 5
-/*
- * T side will be updated with a downlevel image
- */
-#define VALIDATE_TMP_UPDATE_DL 6
-/*
- * The candidate image's release date is later than the system's firmware
- * service entitlement date - service warranty period has expired
- */
-#define VALIDATE_OUT_OF_WRNTY 7
+#define VALIDATE_TMP_UPDATE 0 /* Validate Return Status */
+#define VALIDATE_FLASH_AUTH 1 /* Validate Return Status */
+#define VALIDATE_INVALID_IMG 2 /* Validate Return Status */
+#define VALIDATE_CUR_UNKNOWN 3 /* Validate Return Status */
+#define VALIDATE_TMP_COMMIT_DL 4 /* Validate Return Status */
+#define VALIDATE_TMP_COMMIT 5 /* Validate Return Status */
+#define VALIDATE_TMP_UPDATE_DL 6 /* Validate Return Status */
/* ibm,manage-flash-image operation tokens */
#define RTAS_REJECT_TMP_IMG 0
@@ -808,11 +790,6 @@ static void __exit rtas_flash_cleanup(void)
{
rtas_flash_term_hook = NULL;
- if (rtas_firmware_flash_list) {
- free_flash_list(rtas_firmware_flash_list);
- rtas_firmware_flash_list = NULL;
- }
-
if (flash_block_cache)
kmem_cache_destroy(flash_block_cache);
diff --git a/trunk/arch/powerpc/kernel/rtas_pci.c b/trunk/arch/powerpc/kernel/rtas_pci.c
index 6e7b7cdeec65..71cb20d6ec61 100644
--- a/trunk/arch/powerpc/kernel/rtas_pci.c
+++ b/trunk/arch/powerpc/kernel/rtas_pci.c
@@ -201,7 +201,7 @@ static void python_countermeasures(struct device_node *dev)
iounmap(chip_regs);
}
-void __init init_pci_config_tokens(void)
+void __init init_pci_config_tokens (void)
{
read_pci_config = rtas_token("read-pci-config");
write_pci_config = rtas_token("write-pci-config");
@@ -209,7 +209,7 @@ void __init init_pci_config_tokens(void)
ibm_write_pci_config = rtas_token("ibm,write-pci-config");
}
-unsigned long get_phb_buid(struct device_node *phb)
+unsigned long get_phb_buid (struct device_node *phb)
{
struct resource r;
diff --git a/trunk/arch/powerpc/kernel/rtasd.c b/trunk/arch/powerpc/kernel/rtasd.c
index 1130c53ad652..1045ff49cc6d 100644
--- a/trunk/arch/powerpc/kernel/rtasd.c
+++ b/trunk/arch/powerpc/kernel/rtasd.c
@@ -29,7 +29,6 @@
#include
#include
#include
-#include
static DEFINE_SPINLOCK(rtasd_log_lock);
@@ -88,8 +87,6 @@ static char *rtas_event_type(int type)
return "Resource Deallocation Event";
case RTAS_TYPE_DUMP:
return "Dump Notification Event";
- case RTAS_TYPE_PRRN:
- return "Platform Resource Reassignment Event";
}
return rtas_type[0];
@@ -268,51 +265,9 @@ void pSeries_log_error(char *buf, unsigned int err_type, int fatal)
spin_unlock_irqrestore(&rtasd_log_lock, s);
return;
}
-}
-
-#ifdef CONFIG_PPC_PSERIES
-static s32 prrn_update_scope;
-static void prrn_work_fn(struct work_struct *work)
-{
- /*
- * For PRRN, we must pass the negative of the scope value in
- * the RTAS event.
- */
- pseries_devicetree_update(-prrn_update_scope);
}
-static DECLARE_WORK(prrn_work, prrn_work_fn);
-
-void prrn_schedule_update(u32 scope)
-{
- flush_work(&prrn_work);
- prrn_update_scope = scope;
- schedule_work(&prrn_work);
-}
-
-static void handle_rtas_event(const struct rtas_error_log *log)
-{
- if (log->type == RTAS_TYPE_PRRN) {
- /* For PRRN Events the extended log length is used to denote
- * the scope for calling rtas update-nodes.
- */
- if (prrn_is_enabled())
- prrn_schedule_update(log->extended_log_length);
- }
-
- return;
-}
-
-#else
-
-static void handle_rtas_event(const struct rtas_error_log *log)
-{
- return;
-}
-
-#endif
-
static int rtas_log_open(struct inode * inode, struct file * file)
{
return 0;
@@ -433,10 +388,8 @@ static void do_event_scan(void)
break;
}
- if (error == 0) {
+ if (error == 0)
pSeries_log_error(logdata, ERR_TYPE_RTAS_LOG, 0);
- handle_rtas_event((struct rtas_error_log *)logdata);
- }
} while(error == 0);
}
diff --git a/trunk/arch/powerpc/kernel/setup-common.c b/trunk/arch/powerpc/kernel/setup-common.c
index 63d051f5b7a5..bdc499c17872 100644
--- a/trunk/arch/powerpc/kernel/setup-common.c
+++ b/trunk/arch/powerpc/kernel/setup-common.c
@@ -621,6 +621,12 @@ int check_legacy_ioport(unsigned long base_port)
case FDC_BASE: /* FDC1 */
np = of_find_node_by_type(NULL, "fdc");
break;
+#ifdef CONFIG_PPC_PREP
+ case _PIDXR:
+ case _PNPWRP:
+ case PNPBIOS_BASE:
+ /* implement me */
+#endif
default:
/* ipmi is supposed to fail here */
break;
diff --git a/trunk/arch/powerpc/kernel/signal_32.c b/trunk/arch/powerpc/kernel/signal_32.c
index 95068bf569ad..3acb28e245b4 100644
--- a/trunk/arch/powerpc/kernel/signal_32.c
+++ b/trunk/arch/powerpc/kernel/signal_32.c
@@ -866,12 +866,10 @@ static long restore_tm_user_regs(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
-#endif
return 0;
}
diff --git a/trunk/arch/powerpc/kernel/signal_64.c b/trunk/arch/powerpc/kernel/signal_64.c
index c1794286098c..995f8543cb57 100644
--- a/trunk/arch/powerpc/kernel/signal_64.c
+++ b/trunk/arch/powerpc/kernel/signal_64.c
@@ -522,12 +522,10 @@ static long restore_tm_sigcontexts(struct pt_regs *regs,
do_load_up_transact_fpu(¤t->thread);
regs->msr |= (MSR_FP | current->thread.fpexc_mode);
}
-#ifdef CONFIG_ALTIVEC
if (msr & MSR_VEC) {
do_load_up_transact_altivec(¤t->thread);
regs->msr |= MSR_VEC;
}
-#endif
return err;
}
diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c
index 5fc29ad7e26f..f77fa22754bc 100644
--- a/trunk/arch/powerpc/kernel/time.c
+++ b/trunk/arch/powerpc/kernel/time.c
@@ -1049,8 +1049,10 @@ static int __init rtc_init(void)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- return PTR_RET(pdev);
+ return 0;
}
module_init(rtc_init);
diff --git a/trunk/arch/powerpc/kernel/tm.S b/trunk/arch/powerpc/kernel/tm.S
index 2da67e7a16d5..84dbace657ce 100644
--- a/trunk/arch/powerpc/kernel/tm.S
+++ b/trunk/arch/powerpc/kernel/tm.S
@@ -309,7 +309,6 @@ _GLOBAL(tm_recheckpoint)
or r5, r6, r5 /* Set MSR.FP+.VSX/.VEC */
mtmsr r5
-#ifdef CONFIG_ALTIVEC
/* FP and VEC registers: These are recheckpointed from thread.fpr[]
* and thread.vr[] respectively. The thread.transact_fpr[] version
* is more modern, and will be loaded subsequently by any FPUnavailable
@@ -324,7 +323,6 @@ _GLOBAL(tm_recheckpoint)
REST_32VRS(0, r5, r3) /* r5 scratch, r3 THREAD ptr */
ld r5, THREAD_VRSAVE(r3)
mtspr SPRN_VRSAVE, r5
-#endif
dont_restore_vec:
andi. r0, r4, MSR_FP
diff --git a/trunk/arch/powerpc/kernel/vdso.c b/trunk/arch/powerpc/kernel/vdso.c
index d4f463ac65b1..1b2076f049ce 100644
--- a/trunk/arch/powerpc/kernel/vdso.c
+++ b/trunk/arch/powerpc/kernel/vdso.c
@@ -113,10 +113,6 @@ static struct vdso_patch_def vdso_patches[] = {
CPU_FTR_USE_TB, 0,
"__kernel_get_tbfreq", NULL
},
- {
- CPU_FTR_USE_TB, 0,
- "__kernel_time", NULL
- },
};
/*
diff --git a/trunk/arch/powerpc/kernel/vdso32/gettimeofday.S b/trunk/arch/powerpc/kernel/vdso32/gettimeofday.S
index 27e2f623210b..4ee09ee2e836 100644
--- a/trunk/arch/powerpc/kernel/vdso32/gettimeofday.S
+++ b/trunk/arch/powerpc/kernel/vdso32/gettimeofday.S
@@ -180,32 +180,6 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
V_FUNCTION_END(__kernel_clock_getres)
-/*
- * Exact prototype of time()
- *
- * time_t time(time *t);
- *
- */
-V_FUNCTION_BEGIN(__kernel_time)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
-
- mr r11,r3 /* r11 holds t */
- bl __get_datapage@local
- mr r9, r3 /* datapage ptr in r9 */
-
- lwz r3,STAMP_XTIME+TSPEC_TV_SEC(r9)
-
- cmplwi r11,0 /* check if t is NULL */
- beq 2f
- stw r3,0(r11) /* store result at *t */
-2: mtlr r12
- crclr cr0*4+so
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_time)
-
/*
* This is the core of clock_gettime() and gettimeofday(),
* it returns the current time in r3 (seconds) and r4.
diff --git a/trunk/arch/powerpc/kernel/vdso32/vdso32.lds.S b/trunk/arch/powerpc/kernel/vdso32/vdso32.lds.S
index f223409629b9..43200ba2e570 100644
--- a/trunk/arch/powerpc/kernel/vdso32/vdso32.lds.S
+++ b/trunk/arch/powerpc/kernel/vdso32/vdso32.lds.S
@@ -150,7 +150,6 @@ VERSION
#ifdef CONFIG_PPC64
__kernel_getcpu;
#endif
- __kernel_time;
local: *;
};
diff --git a/trunk/arch/powerpc/kernel/vdso64/gettimeofday.S b/trunk/arch/powerpc/kernel/vdso64/gettimeofday.S
index a76b4af37ef2..e97a9a0dc4ac 100644
--- a/trunk/arch/powerpc/kernel/vdso64/gettimeofday.S
+++ b/trunk/arch/powerpc/kernel/vdso64/gettimeofday.S
@@ -164,32 +164,6 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
.cfi_endproc
V_FUNCTION_END(__kernel_clock_getres)
-/*
- * Exact prototype of time()
- *
- * time_t time(time *t);
- *
- */
-V_FUNCTION_BEGIN(__kernel_time)
- .cfi_startproc
- mflr r12
- .cfi_register lr,r12
-
- mr r11,r3 /* r11 holds t */
- bl V_LOCAL_FUNC(__get_datapage)
-
- ld r4,STAMP_XTIME+TSPC64_TV_SEC(r3)
-
- cmpldi r11,0 /* check if t is NULL */
- beq 2f
- std r4,0(r11) /* store result at *t */
-2: mtlr r12
- crclr cr0*4+so
- mr r3,r4
- blr
- .cfi_endproc
-V_FUNCTION_END(__kernel_time)
-
/*
* This is the core of clock_gettime() and gettimeofday(),
diff --git a/trunk/arch/powerpc/kernel/vdso64/vdso64.lds.S b/trunk/arch/powerpc/kernel/vdso64/vdso64.lds.S
index e4863819663b..e6c1758f3588 100644
--- a/trunk/arch/powerpc/kernel/vdso64/vdso64.lds.S
+++ b/trunk/arch/powerpc/kernel/vdso64/vdso64.lds.S
@@ -147,7 +147,6 @@ VERSION
__kernel_sync_dicache_p5;
__kernel_sigtramp_rt64;
__kernel_getcpu;
- __kernel_time;
local: *;
};
diff --git a/trunk/arch/powerpc/kvm/book3s_hv_interrupts.S b/trunk/arch/powerpc/kvm/book3s_hv_interrupts.S
index 37f1cc417ca0..84035a528c80 100644
--- a/trunk/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/trunk/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -122,16 +122,11 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
add r8,r8,r7
std r8,HSTATE_DECEXP(r13)
-#ifdef CONFIG_SMP
/*
* On PPC970, if the guest vcpu has an external interrupt pending,
* send ourselves an IPI so as to interrupt the guest once it
* enables interrupts. (It must have interrupts disabled,
* otherwise we would already have delivered the interrupt.)
- *
- * XXX If this is a UP build, smp_send_reschedule is not available,
- * so the interrupt will be delayed until the next time the vcpu
- * enters the guest with interrupts enabled.
*/
BEGIN_FTR_SECTION
ld r0, VCPU_PENDING_EXC(r4)
@@ -146,7 +141,6 @@ BEGIN_FTR_SECTION
mr r4, r31
32:
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-#endif /* CONFIG_SMP */
/* Jump to partition switch code */
bl .kvmppc_hv_entry_trampoline
diff --git a/trunk/arch/powerpc/kvm/book3s_pr.c b/trunk/arch/powerpc/kvm/book3s_pr.c
index dbdc15aa8127..5e93438afb06 100644
--- a/trunk/arch/powerpc/kvm/book3s_pr.c
+++ b/trunk/arch/powerpc/kvm/book3s_pr.c
@@ -1039,7 +1039,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
if (!vcpu_book3s)
goto out;
- vcpu_book3s->shadow_vcpu =
+ vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
if (!vcpu_book3s->shadow_vcpu)
goto free_vcpu;
diff --git a/trunk/arch/powerpc/kvm/e500.h b/trunk/arch/powerpc/kvm/e500.h
index 33db48a8ce24..41cefd43655f 100644
--- a/trunk/arch/powerpc/kvm/e500.h
+++ b/trunk/arch/powerpc/kvm/e500.h
@@ -26,20 +26,17 @@
#define E500_PID_NUM 3
#define E500_TLB_NUM 2
-/* entry is mapped somewhere in host TLB */
-#define E500_TLB_VALID (1 << 0)
-/* TLB1 entry is mapped by host TLB1, tracked by bitmaps */
-#define E500_TLB_BITMAP (1 << 1)
-/* TLB1 entry is mapped by host TLB0 */
+#define E500_TLB_VALID 1
+#define E500_TLB_BITMAP 2
#define E500_TLB_TLB0 (1 << 2)
struct tlbe_ref {
- pfn_t pfn; /* valid only for TLB0, except briefly */
- unsigned int flags; /* E500_TLB_* */
+ pfn_t pfn;
+ unsigned int flags; /* E500_TLB_* */
};
struct tlbe_priv {
- struct tlbe_ref ref;
+ struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
};
#ifdef CONFIG_KVM_E500V2
@@ -66,6 +63,17 @@ struct kvmppc_vcpu_e500 {
unsigned int gtlb_nv[E500_TLB_NUM];
+ /*
+ * information associated with each host TLB entry --
+ * TLB1 only for now. If/when guest TLB1 entries can be
+ * mapped with host TLB0, this will be used for that too.
+ *
+ * We don't want to use this for guest TLB0 because then we'd
+ * have the overhead of doing the translation again even if
+ * the entry is still in the guest TLB (e.g. we swapped out
+ * and back, and our host TLB entries got evicted).
+ */
+ struct tlbe_ref *tlb_refs[E500_TLB_NUM];
unsigned int host_tlb1_nv;
u32 svr;
diff --git a/trunk/arch/powerpc/kvm/e500_mmu_host.c b/trunk/arch/powerpc/kvm/e500_mmu_host.c
index 1c6a9d729df4..a222edfb9a9b 100644
--- a/trunk/arch/powerpc/kvm/e500_mmu_host.c
+++ b/trunk/arch/powerpc/kvm/e500_mmu_host.c
@@ -193,11 +193,8 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
/* Don't bother with unmapped entries */
- if (!(ref->flags & E500_TLB_VALID)) {
- WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
- "%s: flags %x\n", __func__, ref->flags);
- WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
- }
+ if (!(ref->flags & E500_TLB_VALID))
+ return;
if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
@@ -251,7 +248,7 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
pfn_t pfn)
{
ref->pfn = pfn;
- ref->flags |= E500_TLB_VALID;
+ ref->flags = E500_TLB_VALID;
if (tlbe_is_writable(gtlbe))
kvm_set_pfn_dirty(pfn);
@@ -260,7 +257,6 @@ static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
{
if (ref->flags & E500_TLB_VALID) {
- /* FIXME: don't log bogus pfn for TLB1 */
trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
ref->flags = 0;
}
@@ -278,23 +274,36 @@ static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- int tlbsel;
+ int tlbsel = 0;
int i;
- for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->gtlb_priv[tlbsel][i].ref;
+ kvmppc_e500_ref_release(ref);
}
}
-void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
{
- struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ int stlbsel = 1;
+ int i;
+
kvmppc_e500_tlbil_all(vcpu_e500);
+
+ for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
+ struct tlbe_ref *ref =
+ &vcpu_e500->tlb_refs[stlbsel][i];
+ kvmppc_e500_ref_release(ref);
+ }
+
clear_tlb_privs(vcpu_e500);
+}
+
+void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
+{
+ struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
+ clear_tlb_refs(vcpu_e500);
clear_tlb1_bitmap(vcpu_e500);
}
@@ -449,6 +458,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
}
+ /* Drop old ref and setup new one. */
+ kvmppc_e500_ref_release(ref);
kvmppc_e500_ref_setup(ref, gtlbe, pfn);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
@@ -496,15 +507,14 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
vcpu_e500->host_tlb1_nv = 0;
+ vcpu_e500->tlb_refs[1][sesel] = *ref;
+ vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
+ vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
- unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
+ unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
-
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
- vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
- vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
- WARN_ON(!(ref->flags & E500_TLB_VALID));
+ vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
return sesel;
}
@@ -516,12 +526,13 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
- struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
+ struct tlbe_ref ref;
int sesel;
int r;
+ ref.flags = 0;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
- ref);
+ &ref);
if (r)
return r;
@@ -533,7 +544,7 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
/* Otherwise map into TLB1 */
- sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, &ref, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0;
@@ -554,7 +565,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
case 0:
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
- /* Triggers after clear_tlb_privs or on initial mapping */
+ /* Triggers after clear_tlb_refs or on initial mapping */
if (!(priv->ref.flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else {
@@ -654,16 +665,35 @@ int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
host_tlb_params[0].entries / host_tlb_params[0].ways;
host_tlb_params[1].sets = 1;
+ vcpu_e500->tlb_refs[0] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[0])
+ goto err;
+
+ vcpu_e500->tlb_refs[1] =
+ kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
+ GFP_KERNEL);
+ if (!vcpu_e500->tlb_refs[1])
+ goto err;
+
vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
host_tlb_params[1].entries,
GFP_KERNEL);
if (!vcpu_e500->h2g_tlb1_rmap)
- return -EINVAL;
+ goto err;
return 0;
+
+err:
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
+ return -EINVAL;
}
void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
{
kfree(vcpu_e500->h2g_tlb1_rmap);
+ kfree(vcpu_e500->tlb_refs[0]);
+ kfree(vcpu_e500->tlb_refs[1]);
}
diff --git a/trunk/arch/powerpc/kvm/e500mc.c b/trunk/arch/powerpc/kvm/e500mc.c
index 2f4baa074b2e..1f89d26e65fb 100644
--- a/trunk/arch/powerpc/kvm/e500mc.c
+++ b/trunk/arch/powerpc/kvm/e500mc.c
@@ -108,8 +108,6 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
-static DEFINE_PER_CPU(struct kvm_vcpu *, last_vcpu_on_cpu);
-
void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -138,11 +136,8 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
- if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
- __get_cpu_var(last_vcpu_on_cpu) != vcpu) {
+ if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
kvmppc_e500_tlbil_all(vcpu_e500);
- __get_cpu_var(last_vcpu_on_cpu) = vcpu;
- }
kvmppc_load_guest_fp(vcpu);
}
diff --git a/trunk/arch/powerpc/mm/hash_utils_64.c b/trunk/arch/powerpc/mm/hash_utils_64.c
index 1ed4419c533b..f410c3e12c1e 100644
--- a/trunk/arch/powerpc/mm/hash_utils_64.c
+++ b/trunk/arch/powerpc/mm/hash_utils_64.c
@@ -1230,60 +1230,24 @@ void low_hash_fault(struct pt_regs *regs, unsigned long address, int rc)
bad_page_fault(regs, address, SIGBUS);
}
-long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
- unsigned long pa, unsigned long rflags,
- unsigned long vflags, int psize, int ssize)
-{
- unsigned long hpte_group;
- long slot;
-
-repeat:
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
-
- /* Insert into the hash table, primary slot */
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, vflags,
- psize, ssize);
-
- /* Primary is full, try the secondary */
- if (unlikely(slot == -1)) {
- hpte_group = ((~hash & htab_hash_mask) *
- HPTES_PER_GROUP) & ~0x7UL;
- slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
- vflags | HPTE_V_SECONDARY,
- psize, ssize);
- if (slot == -1) {
- if (mftb() & 0x1)
- hpte_group = ((hash & htab_hash_mask) *
- HPTES_PER_GROUP)&~0x7UL;
-
- ppc_md.hpte_remove(hpte_group);
- goto repeat;
- }
- }
-
- return slot;
-}
-
#ifdef CONFIG_DEBUG_PAGEALLOC
static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
{
- unsigned long hash;
+ unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, mmu_kernel_ssize);
unsigned long vpn = hpt_vpn(vaddr, vsid, mmu_kernel_ssize);
unsigned long mode = htab_convert_pte_flags(PAGE_KERNEL);
- long ret;
+ int ret;
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
+ hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Don't create HPTE entries for bad address */
if (!vsid)
return;
-
- ret = hpte_insert_repeating(hash, vpn, __pa(vaddr), mode,
- HPTE_V_BOLTED,
- mmu_linear_psize, mmu_kernel_ssize);
-
+ ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
+ mode, HPTE_V_BOLTED,
+ mmu_linear_psize, mmu_kernel_ssize);
BUG_ON (ret < 0);
spin_lock(&linear_map_hash_lock);
BUG_ON(linear_map_hash_slots[lmi] & 0x80);
diff --git a/trunk/arch/powerpc/mm/hugetlbpage-hash64.c b/trunk/arch/powerpc/mm/hugetlbpage-hash64.c
index b913f416d97a..cecad348f604 100644
--- a/trunk/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/trunk/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -14,10 +14,6 @@
#include
#include
-extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
- unsigned long pa, unsigned long rlags,
- unsigned long vflags, int psize, int ssize);
-
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
pte_t *ptep, unsigned long trap, int local, int ssize,
unsigned int shift, unsigned int mmu_psize)
@@ -87,9 +83,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
if (likely(!(old_pte & _PAGE_HASHPTE))) {
unsigned long hash = hpt_hash(vpn, shift, ssize);
+ unsigned long hpte_group;
pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
+repeat:
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+
/* clear HPTE slot informations in new PTE */
#ifdef CONFIG_PPC_64K_PAGES
new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
@@ -100,8 +101,26 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
_PAGE_COHERENT | _PAGE_GUARDED));
- slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0,
- mmu_psize, ssize);
+ /* Insert into the hash table, primary slot */
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
+ mmu_psize, ssize);
+
+ /* Primary is full, try the secondary */
+ if (unlikely(slot == -1)) {
+ hpte_group = ((~hash & htab_hash_mask) *
+ HPTES_PER_GROUP) & ~0x7UL;
+ slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
+ HPTE_V_SECONDARY,
+ mmu_psize, ssize);
+ if (slot == -1) {
+ if (mftb() & 0x1)
+ hpte_group = ((hash & htab_hash_mask) *
+ HPTES_PER_GROUP)&~0x7UL;
+
+ ppc_md.hpte_remove(hpte_group);
+ goto repeat;
+ }
+ }
/*
* Hypervisor failure. Restore old pte and return -1
diff --git a/trunk/arch/powerpc/mm/hugetlbpage.c b/trunk/arch/powerpc/mm/hugetlbpage.c
index 5dc52d803ed8..1a6de0a7d8eb 100644
--- a/trunk/arch/powerpc/mm/hugetlbpage.c
+++ b/trunk/arch/powerpc/mm/hugetlbpage.c
@@ -742,7 +742,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
struct hstate *hstate = hstate_file(file);
int mmu_psize = shift_to_mmu_psize(huge_page_shift(hstate));
- return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1);
+ return slice_get_unmapped_area(addr, len, flags, mmu_psize, 1, 0);
}
#endif
diff --git a/trunk/arch/powerpc/mm/icswx.c b/trunk/arch/powerpc/mm/icswx.c
index 915412e4d5ba..8cdbd8634a58 100644
--- a/trunk/arch/powerpc/mm/icswx.c
+++ b/trunk/arch/powerpc/mm/icswx.c
@@ -67,7 +67,7 @@
void switch_cop(struct mm_struct *next)
{
-#ifdef CONFIG_PPC_ICSWX_PID
+#ifdef CONFIG_ICSWX_PID
mtspr(SPRN_PID, next->context.cop_pid);
#endif
mtspr(SPRN_ACOP, next->context.acop);
diff --git a/trunk/arch/powerpc/mm/mem.c b/trunk/arch/powerpc/mm/mem.c
index 056732e8ca38..f1f7409a4183 100644
--- a/trunk/arch/powerpc/mm/mem.c
+++ b/trunk/arch/powerpc/mm/mem.c
@@ -66,9 +66,10 @@ unsigned long long memory_limit;
#ifdef CONFIG_HIGHMEM
pte_t *kmap_pte;
-EXPORT_SYMBOL(kmap_pte);
pgprot_t kmap_prot;
+
EXPORT_SYMBOL(kmap_prot);
+EXPORT_SYMBOL(kmap_pte);
static inline pte_t *virt_to_kpte(unsigned long vaddr)
{
diff --git a/trunk/arch/powerpc/mm/numa.c b/trunk/arch/powerpc/mm/numa.c
index 2d13f90476bd..bba87ca2b4d7 100644
--- a/trunk/arch/powerpc/mm/numa.c
+++ b/trunk/arch/powerpc/mm/numa.c
@@ -22,10 +22,6 @@
#include
#include
#include
-#include
-#include
-#include
-#include
#include
#include
#include
@@ -33,7 +29,6 @@
#include
#include
#include
-#include
static int numa_enabled = 1;
@@ -84,7 +79,7 @@ static void __init setup_node_to_cpumask_map(void)
dbg("Node to cpumask map for %d nodes\n", nr_node_ids);
}
-static int __init fake_numa_create_new_node(unsigned long end_pfn,
+static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
unsigned int *nid)
{
unsigned long long mem;
@@ -206,7 +201,7 @@ int __node_distance(int a, int b)
int distance = LOCAL_DISTANCE;
if (!form1_affinity)
- return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE);
+ return distance;
for (i = 0; i < distance_ref_points_depth; i++) {
if (distance_lookup_table[a][i] == distance_lookup_table[b][i])
@@ -296,7 +291,9 @@ EXPORT_SYMBOL_GPL(of_node_to_nid);
static int __init find_min_common_depth(void)
{
int depth;
+ struct device_node *chosen;
struct device_node *root;
+ const char *vec5;
if (firmware_has_feature(FW_FEATURE_OPAL))
root = of_find_node_by_path("/ibm,opal");
@@ -328,10 +325,24 @@ static int __init find_min_common_depth(void)
distance_ref_points_depth /= sizeof(int);
- if (firmware_has_feature(FW_FEATURE_OPAL) ||
- firmware_has_feature(FW_FEATURE_TYPE1_AFFINITY)) {
- dbg("Using form 1 affinity\n");
+#define VEC5_AFFINITY_BYTE 5
+#define VEC5_AFFINITY 0x80
+
+ if (firmware_has_feature(FW_FEATURE_OPAL))
form1_affinity = 1;
+ else {
+ chosen = of_find_node_by_path("/chosen");
+ if (chosen) {
+ vec5 = of_get_property(chosen,
+ "ibm,architecture-vec-5", NULL);
+ if (vec5 && (vec5[VEC5_AFFINITY_BYTE] &
+ VEC5_AFFINITY)) {
+ dbg("Using form 1 affinity\n");
+ form1_affinity = 1;
+ }
+
+ of_node_put(chosen);
+ }
}
if (form1_affinity) {
@@ -1259,18 +1270,10 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
-struct topology_update_data {
- struct topology_update_data *next;
- unsigned int cpu;
- int old_nid;
- int new_nid;
-};
-
static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS];
static cpumask_t cpu_associativity_changes_mask;
static int vphn_enabled;
-static int prrn_enabled;
-static void reset_topology_timer(void);
+static void set_topology_timer(void);
/*
* Store the current values of the associativity change counters in the
@@ -1306,9 +1309,11 @@ static void setup_cpu_associativity_change_counters(void)
*/
static int update_cpu_associativity_changes_mask(void)
{
- int cpu;
+ int cpu, nr_cpus = 0;
cpumask_t *changes = &cpu_associativity_changes_mask;
+ cpumask_clear(changes);
+
for_each_possible_cpu(cpu) {
int i, changed = 0;
u8 *counts = vphn_cpu_change_counts[cpu];
@@ -1322,10 +1327,11 @@ static int update_cpu_associativity_changes_mask(void)
}
if (changed) {
cpumask_set_cpu(cpu, changes);
+ nr_cpus++;
}
}
- return cpumask_weight(changes);
+ return nr_cpus;
}
/*
@@ -1416,85 +1422,41 @@ static long vphn_get_associativity(unsigned long cpu,
return rc;
}
-/*
- * Update the CPU maps and sysfs entries for a single CPU when its NUMA
- * characteristics change. This function doesn't perform any locking and is
- * only safe to call from stop_machine().
- */
-static int update_cpu_topology(void *data)
-{
- struct topology_update_data *update;
- unsigned long cpu;
-
- if (!data)
- return -EINVAL;
-
- cpu = get_cpu();
-
- for (update = data; update; update = update->next) {
- if (cpu != update->cpu)
- continue;
-
- unregister_cpu_under_node(update->cpu, update->old_nid);
- unmap_cpu_from_node(update->cpu);
- map_cpu_to_node(update->cpu, update->new_nid);
- vdso_getcpu_init();
- register_cpu_under_node(update->cpu, update->new_nid);
- }
-
- return 0;
-}
-
/*
* Update the node maps and sysfs entries for each cpu whose home node
* has changed. Returns 1 when the topology has changed, and 0 otherwise.
*/
int arch_update_cpu_topology(void)
{
- unsigned int cpu, changed = 0;
- struct topology_update_data *updates, *ud;
+ int cpu, nid, old_nid, changed = 0;
unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0};
- cpumask_t updated_cpus;
struct device *dev;
- int weight, i = 0;
-
- weight = cpumask_weight(&cpu_associativity_changes_mask);
- if (!weight)
- return 0;
-
- updates = kzalloc(weight * (sizeof(*updates)), GFP_KERNEL);
- if (!updates)
- return 0;
- cpumask_clear(&updated_cpus);
-
- for_each_cpu(cpu, &cpu_associativity_changes_mask) {
- ud = &updates[i++];
- ud->cpu = cpu;
+ for_each_cpu(cpu,&cpu_associativity_changes_mask) {
vphn_get_associativity(cpu, associativity);
- ud->new_nid = associativity_to_nid(associativity);
-
- if (ud->new_nid < 0 || !node_online(ud->new_nid))
- ud->new_nid = first_online_node;
+ nid = associativity_to_nid(associativity);
- ud->old_nid = numa_cpu_lookup_table[cpu];
- cpumask_set_cpu(cpu, &updated_cpus);
+ if (nid < 0 || !node_online(nid))
+ nid = first_online_node;
- if (i < weight)
- ud->next = &updates[i];
- }
+ old_nid = numa_cpu_lookup_table[cpu];
- stop_machine(update_cpu_topology, &updates[0], &updated_cpus);
-
- for (ud = &updates[0]; ud; ud = ud->next) {
- dev = get_cpu_device(ud->cpu);
+ /* Disable hotplug while we update the cpu
+ * masks and sysfs.
+ */
+ get_online_cpus();
+ unregister_cpu_under_node(cpu, old_nid);
+ unmap_cpu_from_node(cpu);
+ map_cpu_to_node(cpu, nid);
+ register_cpu_under_node(cpu, nid);
+ put_online_cpus();
+
+ dev = get_cpu_device(cpu);
if (dev)
kobject_uevent(&dev->kobj, KOBJ_CHANGE);
- cpumask_clear_cpu(ud->cpu, &cpu_associativity_changes_mask);
changed = 1;
}
- kfree(updates);
return changed;
}
@@ -1511,157 +1473,49 @@ void topology_schedule_update(void)
static void topology_timer_fn(unsigned long ignored)
{
- if (prrn_enabled && cpumask_weight(&cpu_associativity_changes_mask))
+ if (!vphn_enabled)
+ return;
+ if (update_cpu_associativity_changes_mask() > 0)
topology_schedule_update();
- else if (vphn_enabled) {
- if (update_cpu_associativity_changes_mask() > 0)
- topology_schedule_update();
- reset_topology_timer();
- }
+ set_topology_timer();
}
static struct timer_list topology_timer =
TIMER_INITIALIZER(topology_timer_fn, 0, 0);
-static void reset_topology_timer(void)
+static void set_topology_timer(void)
{
topology_timer.data = 0;
topology_timer.expires = jiffies + 60 * HZ;
- mod_timer(&topology_timer, topology_timer.expires);
-}
-
-static void stage_topology_update(int core_id)
-{
- cpumask_or(&cpu_associativity_changes_mask,
- &cpu_associativity_changes_mask, cpu_sibling_mask(core_id));
- reset_topology_timer();
-}
-
-static int dt_update_callback(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct of_prop_reconfig *update;
- int rc = NOTIFY_DONE;
-
- switch (action) {
- case OF_RECONFIG_UPDATE_PROPERTY:
- update = (struct of_prop_reconfig *)data;
- if (!of_prop_cmp(update->dn->type, "cpu") &&
- !of_prop_cmp(update->prop->name, "ibm,associativity")) {
- u32 core_id;
- of_property_read_u32(update->dn, "reg", &core_id);
- stage_topology_update(core_id);
- rc = NOTIFY_OK;
- }
- break;
- }
-
- return rc;
+ add_timer(&topology_timer);
}
-static struct notifier_block dt_update_nb = {
- .notifier_call = dt_update_callback,
-};
-
/*
- * Start polling for associativity changes.
+ * Start polling for VPHN associativity changes.
*/
int start_topology_update(void)
{
int rc = 0;
- if (firmware_has_feature(FW_FEATURE_PRRN)) {
- if (!prrn_enabled) {
- prrn_enabled = 1;
- vphn_enabled = 0;
- rc = of_reconfig_notifier_register(&dt_update_nb);
- }
- } else if (firmware_has_feature(FW_FEATURE_VPHN) &&
- get_lppaca()->shared_proc) {
- if (!vphn_enabled) {
- prrn_enabled = 0;
- vphn_enabled = 1;
- setup_cpu_associativity_change_counters();
- init_timer_deferrable(&topology_timer);
- reset_topology_timer();
- }
+ /* Disabled until races with load balancing are fixed */
+ if (0 && firmware_has_feature(FW_FEATURE_VPHN) &&
+ get_lppaca()->shared_proc) {
+ vphn_enabled = 1;
+ setup_cpu_associativity_change_counters();
+ init_timer_deferrable(&topology_timer);
+ set_topology_timer();
+ rc = 1;
}
return rc;
}
+__initcall(start_topology_update);
/*
* Disable polling for VPHN associativity changes.
*/
int stop_topology_update(void)
{
- int rc = 0;
-
- if (prrn_enabled) {
- prrn_enabled = 0;
- rc = of_reconfig_notifier_unregister(&dt_update_nb);
- } else if (vphn_enabled) {
- vphn_enabled = 0;
- rc = del_timer_sync(&topology_timer);
- }
-
- return rc;
-}
-
-int prrn_is_enabled(void)
-{
- return prrn_enabled;
-}
-
-static int topology_read(struct seq_file *file, void *v)
-{
- if (vphn_enabled || prrn_enabled)
- seq_puts(file, "on\n");
- else
- seq_puts(file, "off\n");
-
- return 0;
-}
-
-static int topology_open(struct inode *inode, struct file *file)
-{
- return single_open(file, topology_read, NULL);
-}
-
-static ssize_t topology_write(struct file *file, const char __user *buf,
- size_t count, loff_t *off)
-{
- char kbuf[4]; /* "on" or "off" plus null. */
- int read_len;
-
- read_len = count < 3 ? count : 3;
- if (copy_from_user(kbuf, buf, read_len))
- return -EINVAL;
-
- kbuf[read_len] = '\0';
-
- if (!strncmp(kbuf, "on", 2))
- start_topology_update();
- else if (!strncmp(kbuf, "off", 3))
- stop_topology_update();
- else
- return -EINVAL;
-
- return count;
-}
-
-static const struct file_operations topology_ops = {
- .read = seq_read,
- .write = topology_write,
- .open = topology_open,
- .release = single_release
-};
-
-static int topology_update_init(void)
-{
- start_topology_update();
- proc_create("powerpc/topology_updates", 644, NULL, &topology_ops);
-
- return 0;
+ vphn_enabled = 0;
+ return del_timer_sync(&topology_timer);
}
-device_initcall(topology_update_init);
#endif /* CONFIG_PPC_SPLPAR */
diff --git a/trunk/arch/powerpc/mm/slice.c b/trunk/arch/powerpc/mm/slice.c
index 3e99c149271a..cf9dada734b6 100644
--- a/trunk/arch/powerpc/mm/slice.c
+++ b/trunk/arch/powerpc/mm/slice.c
@@ -237,112 +237,134 @@ static void slice_convert(struct mm_struct *mm, struct slice_mask mask, int psiz
#endif
}
-/*
- * Compute which slice addr is part of;
- * set *boundary_addr to the start or end boundary of that slice
- * (depending on 'end' parameter);
- * return boolean indicating if the slice is marked as available in the
- * 'available' slice_mark.
- */
-static bool slice_scan_available(unsigned long addr,
- struct slice_mask available,
- int end,
- unsigned long *boundary_addr)
-{
- unsigned long slice;
- if (addr < SLICE_LOW_TOP) {
- slice = GET_LOW_SLICE_INDEX(addr);
- *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
- return !!(available.low_slices & (1u << slice));
- } else {
- slice = GET_HIGH_SLICE_INDEX(addr);
- *boundary_addr = (slice + end) ?
- ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
- return !!(available.high_slices & (1u << slice));
- }
-}
-
static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize)
+ int psize, int use_cache)
{
+ struct vm_area_struct *vma;
+ unsigned long start_addr, addr;
+ struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, next_end;
- struct vm_unmapped_area_info info;
-
- info.flags = 0;
- info.length = len;
- info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
- info.align_offset = 0;
-
- addr = TASK_UNMAPPED_BASE;
- while (addr < TASK_SIZE) {
- info.low_limit = addr;
- if (!slice_scan_available(addr, available, 1, &addr))
- continue;
- next_slice:
- /*
- * At this point [info.low_limit; addr) covers
- * available slices only and ends at a slice boundary.
- * Check if we need to reduce the range, or if we can
- * extend it to cover the next available slice.
- */
- if (addr >= TASK_SIZE)
- addr = TASK_SIZE;
- else if (slice_scan_available(addr, available, 1, &next_end)) {
- addr = next_end;
- goto next_slice;
- }
- info.high_limit = addr;
+ if (use_cache) {
+ if (len <= mm->cached_hole_size) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ } else
+ start_addr = addr = mm->free_area_cache;
+ } else
+ start_addr = addr = TASK_UNMAPPED_BASE;
+
+full_search:
+ for (;;) {
+ addr = _ALIGN_UP(addr, 1ul << pshift);
+ if ((TASK_SIZE - len) < addr)
+ break;
+ vma = find_vma(mm, addr);
+ BUG_ON(vma && (addr >= vma->vm_end));
- found = vm_unmapped_area(&info);
- if (!(found & ~PAGE_MASK))
- return found;
+ mask = slice_range_to_mask(addr, len);
+ if (!slice_check_fit(mask, available)) {
+ if (addr < SLICE_LOW_TOP)
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_LOW_SHIFT);
+ else
+ addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
+ continue;
+ }
+ if (!vma || addr + len <= vma->vm_start) {
+ /*
+ * Remember the place where we stopped the search:
+ */
+ if (use_cache)
+ mm->free_area_cache = addr + len;
+ return addr;
+ }
+ if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+ addr = vma->vm_end;
}
+ /* Make sure we didn't miss any holes */
+ if (use_cache && start_addr != TASK_UNMAPPED_BASE) {
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ mm->cached_hole_size = 0;
+ goto full_search;
+ }
return -ENOMEM;
}
static unsigned long slice_find_area_topdown(struct mm_struct *mm,
unsigned long len,
struct slice_mask available,
- int psize)
+ int psize, int use_cache)
{
+ struct vm_area_struct *vma;
+ unsigned long addr;
+ struct slice_mask mask;
int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
- unsigned long addr, found, prev;
- struct vm_unmapped_area_info info;
- info.flags = VM_UNMAPPED_AREA_TOPDOWN;
- info.length = len;
- info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
- info.align_offset = 0;
+ /* check if free_area_cache is useful for us */
+ if (use_cache) {
+ if (len <= mm->cached_hole_size) {
+ mm->cached_hole_size = 0;
+ mm->free_area_cache = mm->mmap_base;
+ }
+
+ /* either no address requested or can't fit in requested
+ * address hole
+ */
+ addr = mm->free_area_cache;
+
+ /* make sure it can fit in the remaining address space */
+ if (addr > len) {
+ addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
+ mask = slice_range_to_mask(addr, len);
+ if (slice_check_fit(mask, available) &&
+ slice_area_is_free(mm, addr, len))
+ /* remember the address as a hint for
+ * next time
+ */
+ return (mm->free_area_cache = addr);
+ }
+ }
addr = mm->mmap_base;
- while (addr > PAGE_SIZE) {
- info.high_limit = addr;
- if (!slice_scan_available(addr - 1, available, 0, &addr))
+ while (addr > len) {
+ /* Go down by chunk size */
+ addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
+
+ /* Check for hit with different page size */
+ mask = slice_range_to_mask(addr, len);
+ if (!slice_check_fit(mask, available)) {
+ if (addr < SLICE_LOW_TOP)
+ addr = _ALIGN_DOWN(addr, 1ul << SLICE_LOW_SHIFT);
+ else if (addr < (1ul << SLICE_HIGH_SHIFT))
+ addr = SLICE_LOW_TOP;
+ else
+ addr = _ALIGN_DOWN(addr, 1ul << SLICE_HIGH_SHIFT);
continue;
+ }
- prev_slice:
/*
- * At this point [addr; info.high_limit) covers
- * available slices only and starts at a slice boundary.
- * Check if we need to reduce the range, or if we can
- * extend it to cover the previous available slice.
+ * Lookup failure means no vma is above this address,
+ * else if new region fits below vma->vm_start,
+ * return with success:
*/
- if (addr < PAGE_SIZE)
- addr = PAGE_SIZE;
- else if (slice_scan_available(addr - 1, available, 0, &prev)) {
- addr = prev;
- goto prev_slice;
+ vma = find_vma(mm, addr);
+ if (!vma || (addr + len) <= vma->vm_start) {
+ /* remember the address as a hint for next time */
+ if (use_cache)
+ mm->free_area_cache = addr;
+ return addr;
}
- info.low_limit = addr;
- found = vm_unmapped_area(&info);
- if (!(found & ~PAGE_MASK))
- return found;
+ /* remember the largest hole we saw so far */
+ if (use_cache && (addr + mm->cached_hole_size) < vma->vm_start)
+ mm->cached_hole_size = vma->vm_start - addr;
+
+ /* try just below the current vma->vm_start */
+ addr = vma->vm_start;
}
/*
@@ -351,18 +373,28 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
* can happen with large stack limits and large mmap()
* allocations.
*/
- return slice_find_area_bottomup(mm, len, available, psize);
+ addr = slice_find_area_bottomup(mm, len, available, psize, 0);
+
+ /*
+ * Restore the topdown base:
+ */
+ if (use_cache) {
+ mm->free_area_cache = mm->mmap_base;
+ mm->cached_hole_size = ~0UL;
+ }
+
+ return addr;
}
static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
struct slice_mask mask, int psize,
- int topdown)
+ int topdown, int use_cache)
{
if (topdown)
- return slice_find_area_topdown(mm, len, mask, psize);
+ return slice_find_area_topdown(mm, len, mask, psize, use_cache);
else
- return slice_find_area_bottomup(mm, len, mask, psize);
+ return slice_find_area_bottomup(mm, len, mask, psize, use_cache);
}
#define or_mask(dst, src) do { \
@@ -383,7 +415,7 @@ static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
unsigned long flags, unsigned int psize,
- int topdown)
+ int topdown, int use_cache)
{
struct slice_mask mask = {0, 0};
struct slice_mask good_mask;
@@ -398,8 +430,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
BUG_ON(mm->task_size == 0);
slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
- slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
- addr, len, flags, topdown);
+ slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d, use_cache=%d\n",
+ addr, len, flags, topdown, use_cache);
if (len > mm->task_size)
return -ENOMEM;
@@ -471,7 +503,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing
* slices for that size
*/
- newaddr = slice_find_area(mm, len, good_mask, psize, topdown);
+ newaddr = slice_find_area(mm, len, good_mask, psize, topdown,
+ use_cache);
if (newaddr != -ENOMEM) {
/* Found within the good mask, we don't have to setup,
* we thus return directly
@@ -503,7 +536,8 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
* anywhere in the good area.
*/
if (addr) {
- addr = slice_find_area(mm, len, good_mask, psize, topdown);
+ addr = slice_find_area(mm, len, good_mask, psize, topdown,
+ use_cache);
if (addr != -ENOMEM) {
slice_dbg(" found area at 0x%lx\n", addr);
return addr;
@@ -513,14 +547,15 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
/* Now let's see if we can find something in the existing slices
* for that size plus free slices
*/
- addr = slice_find_area(mm, len, potential_mask, psize, topdown);
+ addr = slice_find_area(mm, len, potential_mask, psize, topdown,
+ use_cache);
#ifdef CONFIG_PPC_64K_PAGES
if (addr == -ENOMEM && psize == MMU_PAGE_64K) {
/* retry the search with 4k-page slices included */
or_mask(potential_mask, compat_mask);
addr = slice_find_area(mm, len, potential_mask, psize,
- topdown);
+ topdown, use_cache);
}
#endif
@@ -551,7 +586,8 @@ unsigned long arch_get_unmapped_area(struct file *filp,
unsigned long flags)
{
return slice_get_unmapped_area(addr, len, flags,
- current->mm->context.user_psize, 0);
+ current->mm->context.user_psize,
+ 0, 1);
}
unsigned long arch_get_unmapped_area_topdown(struct file *filp,
@@ -561,7 +597,8 @@ unsigned long arch_get_unmapped_area_topdown(struct file *filp,
const unsigned long flags)
{
return slice_get_unmapped_area(addr0, len, flags,
- current->mm->context.user_psize, 1);
+ current->mm->context.user_psize,
+ 1, 1);
}
unsigned int get_slice_psize(struct mm_struct *mm, unsigned long addr)
diff --git a/trunk/arch/powerpc/perf/Makefile b/trunk/arch/powerpc/perf/Makefile
index 510fae10513d..af3fac23768c 100644
--- a/trunk/arch/powerpc/perf/Makefile
+++ b/trunk/arch/powerpc/perf/Makefile
@@ -2,10 +2,9 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
obj-$(CONFIG_PERF_EVENTS) += callchain.o
-obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o bhrb.o
+obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
- power5+-pmu.o power6-pmu.o power7-pmu.o \
- power8-pmu.o
+ power5+-pmu.o power6-pmu.o power7-pmu.o
obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
diff --git a/trunk/arch/powerpc/perf/bhrb.S b/trunk/arch/powerpc/perf/bhrb.S
deleted file mode 100644
index d85f9a58ddbc..000000000000
--- a/trunk/arch/powerpc/perf/bhrb.S
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Basic assembly code to read BHRB entries
- *
- * Copyright 2013 Anshuman Khandual, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include
-#include
-
- .text
-
-.balign 8
-
-/* r3 = n (where n = [0-31])
- * The maximum number of BHRB entries supported with PPC_MFBHRBE instruction
- * is 1024. We have limited number of table entries here as POWER8 implements
- * 32 BHRB entries.
- */
-
-/* .global read_bhrb */
-_GLOBAL(read_bhrb)
- cmpldi r3,31
- bgt 1f
- ld r4,bhrb_table@got(r2)
- sldi r3,r3,3
- add r3,r4,r3
- mtctr r3
- bctr
-1: li r3,0
- blr
-
-#define MFBHRB_TABLE1(n) PPC_MFBHRBE(R3,n); blr
-#define MFBHRB_TABLE2(n) MFBHRB_TABLE1(n); MFBHRB_TABLE1(n+1)
-#define MFBHRB_TABLE4(n) MFBHRB_TABLE2(n); MFBHRB_TABLE2(n+2)
-#define MFBHRB_TABLE8(n) MFBHRB_TABLE4(n); MFBHRB_TABLE4(n+4)
-#define MFBHRB_TABLE16(n) MFBHRB_TABLE8(n); MFBHRB_TABLE8(n+8)
-#define MFBHRB_TABLE32(n) MFBHRB_TABLE16(n); MFBHRB_TABLE16(n+16)
-
-bhrb_table:
- MFBHRB_TABLE32(0)
diff --git a/trunk/arch/powerpc/perf/core-book3s.c b/trunk/arch/powerpc/perf/core-book3s.c
index c627843c5b2e..65362e98eb26 100644
--- a/trunk/arch/powerpc/perf/core-book3s.c
+++ b/trunk/arch/powerpc/perf/core-book3s.c
@@ -19,11 +19,6 @@
#include
#include
-#define BHRB_MAX_ENTRIES 32
-#define BHRB_TARGET 0x0000000000000002
-#define BHRB_PREDICTION 0x0000000000000001
-#define BHRB_EA 0xFFFFFFFFFFFFFFFC
-
struct cpu_hw_events {
int n_events;
int n_percpu;
@@ -43,15 +38,7 @@ struct cpu_hw_events {
unsigned int group_flag;
int n_txn_start;
-
- /* BHRB bits */
- u64 bhrb_filter; /* BHRB HW branch filter */
- int bhrb_users;
- void *bhrb_context;
- struct perf_branch_stack bhrb_stack;
- struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
};
-
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
struct power_pmu *ppmu;
@@ -102,11 +89,6 @@ static inline int siar_valid(struct pt_regs *regs)
#endif /* CONFIG_PPC32 */
-static bool regs_use_siar(struct pt_regs *regs)
-{
- return !!(regs->result & 1);
-}
-
/*
* Things that are specific to 64-bit implementations.
*/
@@ -116,12 +98,11 @@ static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
{
unsigned long mmcra = regs->dsisr;
- if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
+ if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
if (slot > 1)
return 4 * (slot - 1);
}
-
return 0;
}
@@ -149,35 +130,24 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
*addrp = mfspr(SPRN_SDAR);
}
-static bool regs_sihv(struct pt_regs *regs)
+static bool mmcra_sihv(unsigned long mmcra)
{
unsigned long sihv = MMCRA_SIHV;
- if (ppmu->flags & PPMU_HAS_SIER)
- return !!(regs->dar & SIER_SIHV);
-
if (ppmu->flags & PPMU_ALT_SIPR)
sihv = POWER6_MMCRA_SIHV;
- return !!(regs->dsisr & sihv);
+ return !!(mmcra & sihv);
}
-static bool regs_sipr(struct pt_regs *regs)
+static bool mmcra_sipr(unsigned long mmcra)
{
unsigned long sipr = MMCRA_SIPR;
- if (ppmu->flags & PPMU_HAS_SIER)
- return !!(regs->dar & SIER_SIPR);
-
if (ppmu->flags & PPMU_ALT_SIPR)
sipr = POWER6_MMCRA_SIPR;
- return !!(regs->dsisr & sipr);
-}
-
-static bool regs_no_sipr(struct pt_regs *regs)
-{
- return !!(regs->result & 2);
+ return !!(mmcra & sipr);
}
static inline u32 perf_flags_from_msr(struct pt_regs *regs)
@@ -191,7 +161,8 @@ static inline u32 perf_flags_from_msr(struct pt_regs *regs)
static inline u32 perf_get_misc_flags(struct pt_regs *regs)
{
- bool use_siar = regs_use_siar(regs);
+ unsigned long mmcra = regs->dsisr;
+ unsigned long use_siar = regs->result;
if (!use_siar)
return perf_flags_from_msr(regs);
@@ -202,7 +173,7 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
* SIAR which should give slightly more reliable
* results
*/
- if (regs_no_sipr(regs)) {
+ if (ppmu->flags & PPMU_NO_SIPR) {
unsigned long siar = mfspr(SPRN_SIAR);
if (siar >= PAGE_OFFSET)
return PERF_RECORD_MISC_KERNEL;
@@ -210,19 +181,16 @@ static inline u32 perf_get_misc_flags(struct pt_regs *regs)
}
/* PR has priority over HV, so order below is important */
- if (regs_sipr(regs))
+ if (mmcra_sipr(mmcra))
return PERF_RECORD_MISC_USER;
-
- if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
+ if (mmcra_sihv(mmcra) && (freeze_events_kernel != MMCR0_FCHV))
return PERF_RECORD_MISC_HYPERVISOR;
-
return PERF_RECORD_MISC_KERNEL;
}
/*
* Overload regs->dsisr to store MMCRA so we only need to read it once
* on each interrupt.
- * Overload regs->dar to store SIER if we have it.
* Overload regs->result to specify whether we should use the MSR (result
* is zero) or the SIAR (result is non zero).
*/
@@ -232,24 +200,6 @@ static inline void perf_read_regs(struct pt_regs *regs)
int marked = mmcra & MMCRA_SAMPLE_ENABLE;
int use_siar;
- regs->dsisr = mmcra;
- regs->result = 0;
-
- if (ppmu->flags & PPMU_NO_SIPR)
- regs->result |= 2;
-
- /*
- * On power8 if we're in random sampling mode, the SIER is updated.
- * If we're in continuous sampling mode, we don't have SIPR.
- */
- if (ppmu->flags & PPMU_HAS_SIER) {
- if (marked)
- regs->dar = mfspr(SPRN_SIER);
- else
- regs->result |= 2;
- }
-
-
/*
* If this isn't a PMU exception (eg a software event) the SIAR is
* not valid. Use pt_regs.
@@ -273,12 +223,13 @@ static inline void perf_read_regs(struct pt_regs *regs)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
use_siar = 0;
- else if (!regs_no_sipr(regs) && regs_sipr(regs))
+ else if (!(ppmu->flags & PPMU_NO_SIPR) && mmcra_sipr(mmcra))
use_siar = 0;
else
use_siar = 1;
- regs->result |= use_siar;
+ regs->dsisr = mmcra;
+ regs->result = use_siar;
}
/*
@@ -871,9 +822,6 @@ static void power_pmu_enable(struct pmu *pmu)
}
out:
- if (cpuhw->bhrb_users)
- ppmu->config_bhrb(cpuhw->bhrb_filter);
-
local_irq_restore(flags);
}
@@ -904,47 +852,6 @@ static int collect_events(struct perf_event *group, int max_count,
return n;
}
-/* Reset all possible BHRB entries */
-static void power_pmu_bhrb_reset(void)
-{
- asm volatile(PPC_CLRBHRB);
-}
-
-void power_pmu_bhrb_enable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
- if (!ppmu->bhrb_nr)
- return;
-
- /* Clear BHRB if we changed task context to avoid data leaks */
- if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
- power_pmu_bhrb_reset();
- cpuhw->bhrb_context = event->ctx;
- }
- cpuhw->bhrb_users++;
-}
-
-void power_pmu_bhrb_disable(struct perf_event *event)
-{
- struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
-
- if (!ppmu->bhrb_nr)
- return;
-
- cpuhw->bhrb_users--;
- WARN_ON_ONCE(cpuhw->bhrb_users < 0);
-
- if (!cpuhw->disabled && !cpuhw->bhrb_users) {
- /* BHRB cannot be turned off when other
- * events are active on the PMU.
- */
-
- /* avoid stale pointer */
- cpuhw->bhrb_context = NULL;
- }
-}
-
/*
* Add a event to the PMU.
* If all events are not already frozen, then we disable and
@@ -1004,9 +911,6 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
ret = 0;
out:
- if (has_branch_stack(event))
- power_pmu_bhrb_enable(event);
-
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
return ret;
@@ -1059,9 +963,6 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
}
- if (has_branch_stack(event))
- power_pmu_bhrb_disable(event);
-
perf_pmu_enable(event->pmu);
local_irq_restore(flags);
}
@@ -1180,15 +1081,6 @@ int power_pmu_commit_txn(struct pmu *pmu)
return 0;
}
-/* Called from ctxsw to prevent one process's branch entries to
- * mingle with the other process's entries during context switch.
- */
-void power_pmu_flush_branch_stack(void)
-{
- if (ppmu->bhrb_nr)
- power_pmu_bhrb_reset();
-}
-
/*
* Return 1 if we might be able to put event on a limited PMC,
* or 0 if not.
@@ -1303,11 +1195,9 @@ static int power_pmu_event_init(struct perf_event *event)
if (!ppmu)
return -ENOENT;
- if (has_branch_stack(event)) {
- /* PMU has BHRB enabled */
- if (!(ppmu->flags & PPMU_BHRB))
- return -EOPNOTSUPP;
- }
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
switch (event->attr.type) {
case PERF_TYPE_HARDWARE:
@@ -1388,15 +1278,6 @@ static int power_pmu_event_init(struct perf_event *event)
cpuhw = &get_cpu_var(cpu_hw_events);
err = power_check_constraints(cpuhw, events, cflags, n + 1);
-
- if (has_branch_stack(event)) {
- cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
- event->attr.branch_sample_type);
-
- if(cpuhw->bhrb_filter == -1)
- return -EOPNOTSUPP;
- }
-
put_cpu_var(cpu_hw_events);
if (err)
return -EINVAL;
@@ -1455,79 +1336,8 @@ struct pmu power_pmu = {
.cancel_txn = power_pmu_cancel_txn,
.commit_txn = power_pmu_commit_txn,
.event_idx = power_pmu_event_idx,
- .flush_branch_stack = power_pmu_flush_branch_stack,
};
-/* Processing BHRB entries */
-void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
-{
- u64 val;
- u64 addr;
- int r_index, u_index, target, pred;
-
- r_index = 0;
- u_index = 0;
- while (r_index < ppmu->bhrb_nr) {
- /* Assembly read function */
- val = read_bhrb(r_index);
-
- /* Terminal marker: End of valid BHRB entries */
- if (val == 0) {
- break;
- } else {
- /* BHRB field break up */
- addr = val & BHRB_EA;
- pred = val & BHRB_PREDICTION;
- target = val & BHRB_TARGET;
-
- /* Probable Missed entry: Not applicable for POWER8 */
- if ((addr == 0) && (target == 0) && (pred == 1)) {
- r_index++;
- continue;
- }
-
- /* Real Missed entry: Power8 based missed entry */
- if ((addr == 0) && (target == 1) && (pred == 1)) {
- r_index++;
- continue;
- }
-
- /* Reserved condition: Not a valid entry */
- if ((addr == 0) && (target == 1) && (pred == 0)) {
- r_index++;
- continue;
- }
-
- /* Is a target address */
- if (val & BHRB_TARGET) {
- /* First address cannot be a target address */
- if (r_index == 0) {
- r_index++;
- continue;
- }
-
- /* Update target address for the previous entry */
- cpuhw->bhrb_entries[u_index - 1].to = addr;
- cpuhw->bhrb_entries[u_index - 1].mispred = pred;
- cpuhw->bhrb_entries[u_index - 1].predicted = ~pred;
-
- /* Dont increment u_index */
- r_index++;
- } else {
- /* Update address, flags for current entry */
- cpuhw->bhrb_entries[u_index].from = addr;
- cpuhw->bhrb_entries[u_index].mispred = pred;
- cpuhw->bhrb_entries[u_index].predicted = ~pred;
-
- /* Successfully popullated one entry */
- u_index++;
- r_index++;
- }
- }
- }
- cpuhw->bhrb_stack.nr = u_index;
- return;
-}
/*
* A counter has overflowed; update its count and record
@@ -1587,13 +1397,6 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
if (event->attr.sample_type & PERF_SAMPLE_ADDR)
perf_get_data_addr(regs, &data.addr);
- if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
- struct cpu_hw_events *cpuhw;
- cpuhw = &__get_cpu_var(cpu_hw_events);
- power_pmu_bhrb_read(cpuhw);
- data.br_stack = &cpuhw->bhrb_stack;
- }
-
if (perf_event_overflow(event, &data, regs))
power_pmu_stop(event, 0);
}
@@ -1619,7 +1422,7 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
*/
unsigned long perf_instruction_pointer(struct pt_regs *regs)
{
- bool use_siar = regs_use_siar(regs);
+ unsigned long use_siar = regs->result;
if (use_siar && siar_valid(regs))
return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
diff --git a/trunk/arch/powerpc/perf/power5+-pmu.c b/trunk/arch/powerpc/perf/power5+-pmu.c
index b03b6dc0172d..a8757baa28f3 100644
--- a/trunk/arch/powerpc/perf/power5+-pmu.c
+++ b/trunk/arch/powerpc/perf/power5+-pmu.c
@@ -671,7 +671,7 @@ static struct power_pmu power5p_pmu = {
.get_alternatives = power5p_get_alternatives,
.disable_pmc = power5p_disable_pmc,
.limited_pmc_event = power5p_limited_pmc_event,
- .flags = PPMU_LIMITED_PMC5_6 | PPMU_HAS_SSLOT,
+ .flags = PPMU_LIMITED_PMC5_6,
.n_generic = ARRAY_SIZE(power5p_generic_events),
.generic_events = power5p_generic_events,
.cache_events = &power5p_cache_events,
diff --git a/trunk/arch/powerpc/perf/power5-pmu.c b/trunk/arch/powerpc/perf/power5-pmu.c
index 1e8ce423c3af..e7f06eb7a861 100644
--- a/trunk/arch/powerpc/perf/power5-pmu.c
+++ b/trunk/arch/powerpc/perf/power5-pmu.c
@@ -615,7 +615,6 @@ static struct power_pmu power5_pmu = {
.n_generic = ARRAY_SIZE(power5_generic_events),
.generic_events = power5_generic_events,
.cache_events = &power5_cache_events,
- .flags = PPMU_HAS_SSLOT,
};
static int __init init_power5_pmu(void)
diff --git a/trunk/arch/powerpc/perf/power8-pmu.c b/trunk/arch/powerpc/perf/power8-pmu.c
deleted file mode 100644
index f7d1c4fff303..000000000000
--- a/trunk/arch/powerpc/perf/power8-pmu.c
+++ /dev/null
@@ -1,592 +0,0 @@
-/*
- * Performance counter support for POWER8 processors.
- *
- * Copyright 2009 Paul Mackerras, IBM Corporation.
- * Copyright 2013 Michael Ellerman, IBM Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include
-#include
-#include
-
-
-/*
- * Some power8 event codes.
- */
-#define PM_CYC 0x0001e
-#define PM_GCT_NOSLOT_CYC 0x100f8
-#define PM_CMPLU_STALL 0x4000a
-#define PM_INST_CMPL 0x00002
-#define PM_BRU_FIN 0x10068
-#define PM_BR_MPRED_CMPL 0x400f6
-
-
-/*
- * Raw event encoding for POWER8:
- *
- * 60 56 52 48 44 40 36 32
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ thresh_cmp ] [ thresh_ctl ]
- * |
- * thresh start/stop OR FAB match -*
- *
- * 28 24 20 16 12 8 4 0
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ ] [ sample ] [cache] [ pmc ] [unit ] c m [ pmcxsel ]
- * | | | | |
- * | | | | *- mark
- * | | *- L1/L2/L3 cache_sel |
- * | | |
- * | *- sampling mode for marked events *- combine
- * |
- * *- thresh_sel
- *
- * Below uses IBM bit numbering.
- *
- * MMCR1[x:y] = unit (PMCxUNIT)
- * MMCR1[x] = combine (PMCxCOMB)
- *
- * if pmc == 3 and unit == 0 and pmcxsel[0:6] == 0b0101011
- * # PM_MRK_FAB_RSP_MATCH
- * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
- * else if pmc == 4 and unit == 0xf and pmcxsel[0:6] == 0b0101001
- * # PM_MRK_FAB_RSP_MATCH_CYC
- * MMCR1[20:27] = thresh_ctl (FAB_CRESP_MATCH / FAB_TYPE_MATCH)
- * else
- * MMCRA[48:55] = thresh_ctl (THRESH START/END)
- *
- * if thresh_sel:
- * MMCRA[45:47] = thresh_sel
- *
- * if thresh_cmp:
- * MMCRA[22:24] = thresh_cmp[0:2]
- * MMCRA[25:31] = thresh_cmp[3:9]
- *
- * if unit == 6 or unit == 7
- * MMCRC[53:55] = cache_sel[1:3] (L2EVENT_SEL)
- * else if unit == 8 or unit == 9:
- * if cache_sel[0] == 0: # L3 bank
- * MMCRC[47:49] = cache_sel[1:3] (L3EVENT_SEL0)
- * else if cache_sel[0] == 1:
- * MMCRC[50:51] = cache_sel[2:3] (L3EVENT_SEL1)
- * else if cache_sel[1]: # L1 event
- * MMCR1[16] = cache_sel[2]
-Â * MMCR1[17] = cache_sel[3]
- *
- * if mark:
- * MMCRA[63] = 1 (SAMPLE_ENABLE)
- * MMCRA[57:59] = sample[0:2] (RAND_SAMP_ELIG)
-Â * MMCRA[61:62] = sample[3:4] (RAND_SAMP_MODE)
- *
- */
-
-#define EVENT_THR_CMP_SHIFT 40 /* Threshold CMP value */
-#define EVENT_THR_CMP_MASK 0x3ff
-#define EVENT_THR_CTL_SHIFT 32 /* Threshold control value (start/stop) */
-#define EVENT_THR_CTL_MASK 0xffull
-#define EVENT_THR_SEL_SHIFT 29 /* Threshold select value */
-#define EVENT_THR_SEL_MASK 0x7
-#define EVENT_THRESH_SHIFT 29 /* All threshold bits */
-#define EVENT_THRESH_MASK 0x1fffffull
-#define EVENT_SAMPLE_SHIFT 24 /* Sampling mode & eligibility */
-#define EVENT_SAMPLE_MASK 0x1f
-#define EVENT_CACHE_SEL_SHIFT 20 /* L2/L3 cache select */
-#define EVENT_CACHE_SEL_MASK 0xf
-#define EVENT_IS_L1 (4 << EVENT_CACHE_SEL_SHIFT)
-#define EVENT_PMC_SHIFT 16 /* PMC number (1-based) */
-#define EVENT_PMC_MASK 0xf
-#define EVENT_UNIT_SHIFT 12 /* Unit */
-#define EVENT_UNIT_MASK 0xf
-#define EVENT_COMBINE_SHIFT 11 /* Combine bit */
-#define EVENT_COMBINE_MASK 0x1
-#define EVENT_MARKED_SHIFT 8 /* Marked bit */
-#define EVENT_MARKED_MASK 0x1
-#define EVENT_IS_MARKED (EVENT_MARKED_MASK << EVENT_MARKED_SHIFT)
-#define EVENT_PSEL_MASK 0xff /* PMCxSEL value */
-
-/* MMCRA IFM bits - POWER8 */
-#define POWER8_MMCRA_IFM1 0x0000000040000000UL
-#define POWER8_MMCRA_IFM2 0x0000000080000000UL
-#define POWER8_MMCRA_IFM3 0x00000000C0000000UL
-
-#define ONLY_PLM \
- (PERF_SAMPLE_BRANCH_USER |\
- PERF_SAMPLE_BRANCH_KERNEL |\
- PERF_SAMPLE_BRANCH_HV)
-
-/*
- * Layout of constraint bits:
- *
- * 60 56 52 48 44 40 36 32
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ fab_match ] [ thresh_cmp ] [ thresh_ctl ] [ ]
- * |
- * thresh_sel -*
- *
- * 28 24 20 16 12 8 4 0
- * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
- * [ ] [ sample ] [ ] [6] [5] [4] [3] [2] [1]
- * | |
- * L1 I/D qualifier -* | Count of events for each PMC.
- * | p1, p2, p3, p4, p5, p6.
- * nc - number of counters -*
- *
- * The PMC fields P1..P6, and NC, are adder fields. As we accumulate constraints
- * we want the low bit of each field to be added to any existing value.
- *
- * Everything else is a value field.
- */
-
-#define CNST_FAB_MATCH_VAL(v) (((v) & EVENT_THR_CTL_MASK) << 56)
-#define CNST_FAB_MATCH_MASK CNST_FAB_MATCH_VAL(EVENT_THR_CTL_MASK)
-
-/* We just throw all the threshold bits into the constraint */
-#define CNST_THRESH_VAL(v) (((v) & EVENT_THRESH_MASK) << 32)
-#define CNST_THRESH_MASK CNST_THRESH_VAL(EVENT_THRESH_MASK)
-
-#define CNST_L1_QUAL_VAL(v) (((v) & 3) << 22)
-#define CNST_L1_QUAL_MASK CNST_L1_QUAL_VAL(3)
-
-#define CNST_SAMPLE_VAL(v) (((v) & EVENT_SAMPLE_MASK) << 16)
-#define CNST_SAMPLE_MASK CNST_SAMPLE_VAL(EVENT_SAMPLE_MASK)
-
-/*
- * For NC we are counting up to 4 events. This requires three bits, and we need
- * the fifth event to overflow and set the 4th bit. To achieve that we bias the
- * fields by 3 in test_adder.
- */
-#define CNST_NC_SHIFT 12
-#define CNST_NC_VAL (1 << CNST_NC_SHIFT)
-#define CNST_NC_MASK (8 << CNST_NC_SHIFT)
-#define POWER8_TEST_ADDER (3 << CNST_NC_SHIFT)
-
-/*
- * For the per-PMC fields we have two bits. The low bit is added, so if two
- * events ask for the same PMC the sum will overflow, setting the high bit,
- * indicating an error. So our mask sets the high bit.
- */
-#define CNST_PMC_SHIFT(pmc) ((pmc - 1) * 2)
-#define CNST_PMC_VAL(pmc) (1 << CNST_PMC_SHIFT(pmc))
-#define CNST_PMC_MASK(pmc) (2 << CNST_PMC_SHIFT(pmc))
-
-/* Our add_fields is defined as: */
-#define POWER8_ADD_FIELDS \
- CNST_PMC_VAL(1) | CNST_PMC_VAL(2) | CNST_PMC_VAL(3) | \
- CNST_PMC_VAL(4) | CNST_PMC_VAL(5) | CNST_PMC_VAL(6) | CNST_NC_VAL
-
-
-/* Bits in MMCR1 for POWER8 */
-#define MMCR1_UNIT_SHIFT(pmc) (60 - (4 * ((pmc) - 1)))
-#define MMCR1_COMBINE_SHIFT(pmc) (35 - ((pmc) - 1))
-#define MMCR1_PMCSEL_SHIFT(pmc) (24 - (((pmc) - 1)) * 8)
-#define MMCR1_DC_QUAL_SHIFT 47
-#define MMCR1_IC_QUAL_SHIFT 46
-
-/* Bits in MMCRA for POWER8 */
-#define MMCRA_SAMP_MODE_SHIFT 1
-#define MMCRA_SAMP_ELIG_SHIFT 4
-#define MMCRA_THR_CTL_SHIFT 8
-#define MMCRA_THR_SEL_SHIFT 16
-#define MMCRA_THR_CMP_SHIFT 32
-#define MMCRA_SDAR_MODE_TLB (1ull << 42)
-
-
-static inline bool event_is_fab_match(u64 event)
-{
- /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
- event &= 0xff0fe;
-
- /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
- return (event == 0x30056 || event == 0x4f052);
-}
-
-static int power8_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
-{
- unsigned int unit, pmc, cache;
- unsigned long mask, value;
-
- mask = value = 0;
-
- pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- cache = (event >> EVENT_CACHE_SEL_SHIFT) & EVENT_CACHE_SEL_MASK;
-
- if (pmc) {
- if (pmc > 6)
- return -1;
-
- mask |= CNST_PMC_MASK(pmc);
- value |= CNST_PMC_VAL(pmc);
-
- if (pmc >= 5 && event != 0x500fa && event != 0x600f4)
- return -1;
- }
-
- if (pmc <= 4) {
- /*
- * Add to number of counters in use. Note this includes events with
- * a PMC of 0 - they still need a PMC, it's just assigned later.
- * Don't count events on PMC 5 & 6, there is only one valid event
- * on each of those counters, and they are handled above.
- */
- mask |= CNST_NC_MASK;
- value |= CNST_NC_VAL;
- }
-
- if (unit >= 6 && unit <= 9) {
- /*
- * L2/L3 events contain a cache selector field, which is
- * supposed to be programmed into MMCRC. However MMCRC is only
- * HV writable, and there is no API for guest kernels to modify
- * it. The solution is for the hypervisor to initialise the
- * field to zeroes, and for us to only ever allow events that
- * have a cache selector of zero.
- */
- if (cache)
- return -1;
-
- } else if (event & EVENT_IS_L1) {
- mask |= CNST_L1_QUAL_MASK;
- value |= CNST_L1_QUAL_VAL(cache);
- }
-
- if (event & EVENT_IS_MARKED) {
- mask |= CNST_SAMPLE_MASK;
- value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
- }
-
- /*
- * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
- * the threshold control bits are used for the match value.
- */
- if (event_is_fab_match(event)) {
- mask |= CNST_FAB_MATCH_MASK;
- value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
- } else {
- /*
- * Check the mantissa upper two bits are not zero, unless the
- * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
- */
- unsigned int cmp, exp;
-
- cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- exp = cmp >> 7;
-
- if (exp && (cmp & 0x60) == 0)
- return -1;
-
- mask |= CNST_THRESH_MASK;
- value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
- }
-
- *maskp = mask;
- *valp = value;
-
- return 0;
-}
-
-static int power8_compute_mmcr(u64 event[], int n_ev,
- unsigned int hwc[], unsigned long mmcr[])
-{
- unsigned long mmcra, mmcr1, unit, combine, psel, cache, val;
- unsigned int pmc, pmc_inuse;
- int i;
-
- pmc_inuse = 0;
-
- /* First pass to count resource use */
- for (i = 0; i < n_ev; ++i) {
- pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- if (pmc)
- pmc_inuse |= 1 << pmc;
- }
-
- /* In continous sampling mode, update SDAR on TLB miss */
- mmcra = MMCRA_SDAR_MODE_TLB;
- mmcr1 = 0;
-
- /* Second pass: assign PMCs, set all MMCR1 fields */
- for (i = 0; i < n_ev; ++i) {
- pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
- unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
- combine = (event[i] >> EVENT_COMBINE_SHIFT) & EVENT_COMBINE_MASK;
- psel = event[i] & EVENT_PSEL_MASK;
-
- if (!pmc) {
- for (pmc = 1; pmc <= 4; ++pmc) {
- if (!(pmc_inuse & (1 << pmc)))
- break;
- }
-
- pmc_inuse |= 1 << pmc;
- }
-
- if (pmc <= 4) {
- mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
- mmcr1 |= combine << MMCR1_COMBINE_SHIFT(pmc);
- mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
- }
-
- if (event[i] & EVENT_IS_L1) {
- cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
- mmcr1 |= (cache & 1) << MMCR1_IC_QUAL_SHIFT;
- cache >>= 1;
- mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
- }
-
- if (event[i] & EVENT_IS_MARKED) {
- mmcra |= MMCRA_SAMPLE_ENABLE;
-
- val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
- if (val) {
- mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
- mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
- }
- }
-
- /*
- * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
- * the threshold bits are used for the match value.
- */
- if (event_is_fab_match(event[i])) {
- mmcr1 |= (event[i] >> EVENT_THR_CTL_SHIFT) &
- EVENT_THR_CTL_MASK;
- } else {
- val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
- mmcra |= val << MMCRA_THR_CTL_SHIFT;
- val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
- mmcra |= val << MMCRA_THR_SEL_SHIFT;
- val = (event[i] >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
- mmcra |= val << MMCRA_THR_CMP_SHIFT;
- }
-
- hwc[i] = pmc - 1;
- }
-
- /* Return MMCRx values */
- mmcr[0] = 0;
-
- /* pmc_inuse is 1-based */
- if (pmc_inuse & 2)
- mmcr[0] = MMCR0_PMC1CE;
-
- if (pmc_inuse & 0x7c)
- mmcr[0] |= MMCR0_PMCjCE;
-
- mmcr[1] = mmcr1;
- mmcr[2] = mmcra;
-
- return 0;
-}
-
-#define MAX_ALT 2
-
-/* Table of alternatives, sorted by column 0 */
-static const unsigned int event_alternatives[][MAX_ALT] = {
- { 0x10134, 0x301e2 }, /* PM_MRK_ST_CMPL */
- { 0x10138, 0x40138 }, /* PM_BR_MRK_2PATH */
- { 0x18082, 0x3e05e }, /* PM_L3_CO_MEPF */
- { 0x1d14e, 0x401e8 }, /* PM_MRK_DATA_FROM_L2MISS */
- { 0x1e054, 0x4000a }, /* PM_CMPLU_STALL */
- { 0x20036, 0x40036 }, /* PM_BR_2PATH */
- { 0x200f2, 0x300f2 }, /* PM_INST_DISP */
- { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
- { 0x2013c, 0x3012e }, /* PM_MRK_FILT_MATCH */
- { 0x3e054, 0x400f0 }, /* PM_LD_MISS_L1 */
- { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
-};
-
-/*
- * Scan the alternatives table for a match and return the
- * index into the alternatives table if found, else -1.
- */
-static int find_alternative(u64 event)
-{
- int i, j;
-
- for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
- if (event < event_alternatives[i][0])
- break;
-
- for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
- if (event == event_alternatives[i][j])
- return i;
- }
-
- return -1;
-}
-
-static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
-{
- int i, j, num_alt = 0;
- u64 alt_event;
-
- alt[num_alt++] = event;
-
- i = find_alternative(event);
- if (i >= 0) {
- /* Filter out the original event, it's already in alt[0] */
- for (j = 0; j < MAX_ALT; ++j) {
- alt_event = event_alternatives[i][j];
- if (alt_event && alt_event != event)
- alt[num_alt++] = alt_event;
- }
- }
-
- if (flags & PPMU_ONLY_COUNT_RUN) {
- /*
- * We're only counting in RUN state, so PM_CYC is equivalent to
- * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
- */
- j = num_alt;
- for (i = 0; i < num_alt; ++i) {
- switch (alt[i]) {
- case 0x1e: /* PM_CYC */
- alt[j++] = 0x600f4; /* PM_RUN_CYC */
- break;
- case 0x600f4: /* PM_RUN_CYC */
- alt[j++] = 0x1e;
- break;
- case 0x2: /* PM_PPC_CMPL */
- alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
- break;
- case 0x500fa: /* PM_RUN_INST_CMPL */
- alt[j++] = 0x2; /* PM_PPC_CMPL */
- break;
- }
- }
- num_alt = j;
- }
-
- return num_alt;
-}
-
-static void power8_disable_pmc(unsigned int pmc, unsigned long mmcr[])
-{
- if (pmc <= 3)
- mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
-}
-
-PMU_FORMAT_ATTR(event, "config:0-49");
-PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
-PMU_FORMAT_ATTR(mark, "config:8");
-PMU_FORMAT_ATTR(combine, "config:11");
-PMU_FORMAT_ATTR(unit, "config:12-15");
-PMU_FORMAT_ATTR(pmc, "config:16-19");
-PMU_FORMAT_ATTR(cache_sel, "config:20-23");
-PMU_FORMAT_ATTR(sample_mode, "config:24-28");
-PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
-PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
-PMU_FORMAT_ATTR(thresh_start, "config:36-39");
-PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
-
-static struct attribute *power8_pmu_format_attr[] = {
- &format_attr_event.attr,
- &format_attr_pmcxsel.attr,
- &format_attr_mark.attr,
- &format_attr_combine.attr,
- &format_attr_unit.attr,
- &format_attr_pmc.attr,
- &format_attr_cache_sel.attr,
- &format_attr_sample_mode.attr,
- &format_attr_thresh_sel.attr,
- &format_attr_thresh_stop.attr,
- &format_attr_thresh_start.attr,
- &format_attr_thresh_cmp.attr,
- NULL,
-};
-
-struct attribute_group power8_pmu_format_group = {
- .name = "format",
- .attrs = power8_pmu_format_attr,
-};
-
-static const struct attribute_group *power8_pmu_attr_groups[] = {
- &power8_pmu_format_group,
- NULL,
-};
-
-static int power8_generic_events[] = {
- [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
- [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
- [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
- [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
- [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
-};
-
-static u64 power8_bhrb_filter_map(u64 branch_sample_type)
-{
- u64 pmu_bhrb_filter = 0;
- u64 br_privilege = branch_sample_type & ONLY_PLM;
-
- /* BHRB and regular PMU events share the same prvillege state
- * filter configuration. BHRB is always recorded along with a
- * regular PMU event. So privilege state filter criteria for BHRB
- * and the companion PMU events has to be the same. As a default
- * "perf record" tool sets all privillege bits ON when no filter
- * criteria is provided in the command line. So as along as all
- * privillege bits are ON or they are OFF, we are good to go.
- */
- if ((br_privilege != 7) && (br_privilege != 0))
- return -1;
-
- /* No branch filter requested */
- if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
- return pmu_bhrb_filter;
-
- /* Invalid branch filter options - HW does not support */
- if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
- return -1;
-
- if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
- return -1;
-
- if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
- pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
- return pmu_bhrb_filter;
- }
-
- /* Every thing else is unsupported */
- return -1;
-}
-
-static void power8_config_bhrb(u64 pmu_bhrb_filter)
-{
- /* Enable BHRB filter in PMU */
- mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
-}
-
-static struct power_pmu power8_pmu = {
- .name = "POWER8",
- .n_counter = 6,
- .max_alternatives = MAX_ALT + 1,
- .add_fields = POWER8_ADD_FIELDS,
- .test_adder = POWER8_TEST_ADDER,
- .compute_mmcr = power8_compute_mmcr,
- .config_bhrb = power8_config_bhrb,
- .bhrb_filter_map = power8_bhrb_filter_map,
- .get_constraint = power8_get_constraint,
- .get_alternatives = power8_get_alternatives,
- .disable_pmc = power8_disable_pmc,
- .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB,
- .n_generic = ARRAY_SIZE(power8_generic_events),
- .generic_events = power8_generic_events,
- .attr_groups = power8_pmu_attr_groups,
- .bhrb_nr = 32,
-};
-
-static int __init init_power8_pmu(void)
-{
- if (!cur_cpu_spec->oprofile_cpu_type ||
- strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
- return -ENODEV;
-
- return register_power_pmu(&power8_pmu);
-}
-early_initcall(init_power8_pmu);
diff --git a/trunk/arch/powerpc/platforms/40x/Kconfig b/trunk/arch/powerpc/platforms/40x/Kconfig
index bd40bbb15e14..a392d12dd21f 100644
--- a/trunk/arch/powerpc/platforms/40x/Kconfig
+++ b/trunk/arch/powerpc/platforms/40x/Kconfig
@@ -20,6 +20,7 @@ config HOTFOOT
bool "Hotfoot"
depends on 40x
default n
+ select 405EP
select PPC40x_SIMPLE
select PCI
help
@@ -104,6 +105,9 @@ config 405GP
select IBM405_ERR51
select IBM_EMAC_ZMII
+config 405EP
+ bool
+
config 405EX
bool
select IBM_EMAC_EMAC4
@@ -115,6 +119,9 @@ config 405EZ
select IBM_EMAC_MAL_CLR_ICINTSTAT
select IBM_EMAC_MAL_COMMON_ERR
+config 405GPR
+ bool
+
config XILINX_VIRTEX
bool
select DEFAULT_UIMAGE
diff --git a/trunk/arch/powerpc/platforms/512x/mpc512x_shared.c b/trunk/arch/powerpc/platforms/512x/mpc512x_shared.c
index d30235b7e3f7..2765cd1c7678 100644
--- a/trunk/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/trunk/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -330,22 +330,19 @@ void __init mpc512x_init_IRQ(void)
static struct of_device_id __initdata of_bus_ids[] = {
{ .compatible = "fsl,mpc5121-immr", },
{ .compatible = "fsl,mpc5121-localbus", },
+ { .compatible = "fsl,mpc5121-mbx", },
+ { .compatible = "fsl,mpc5121-nfc", },
+ { .compatible = "fsl,mpc5121-sram", },
+ { .compatible = "fsl,mpc5121-pci", },
+ { .compatible = "gpio-leds", },
{},
};
void __init mpc512x_declare_of_platform_devices(void)
{
- struct device_node *np;
-
if (of_platform_bus_probe(NULL, of_bus_ids, NULL))
printk(KERN_ERR __FILE__ ": "
"Error while probing of_platform bus\n");
-
- np = of_find_compatible_node(NULL, NULL, "fsl,mpc5121-nfc");
- if (np) {
- of_platform_device_create(np, NULL, NULL);
- of_node_put(np);
- }
}
#define DEFAULT_FIFO_SIZE 16
diff --git a/trunk/arch/powerpc/platforms/Kconfig b/trunk/arch/powerpc/platforms/Kconfig
index 9089ae71334a..52de8bccfb30 100644
--- a/trunk/arch/powerpc/platforms/Kconfig
+++ b/trunk/arch/powerpc/platforms/Kconfig
@@ -6,6 +6,7 @@ source "arch/powerpc/platforms/chrp/Kconfig"
source "arch/powerpc/platforms/512x/Kconfig"
source "arch/powerpc/platforms/52xx/Kconfig"
source "arch/powerpc/platforms/powermac/Kconfig"
+source "arch/powerpc/platforms/prep/Kconfig"
source "arch/powerpc/platforms/maple/Kconfig"
source "arch/powerpc/platforms/pasemi/Kconfig"
source "arch/powerpc/platforms/ps3/Kconfig"
@@ -232,7 +233,7 @@ endmenu
config PPC601_SYNC_FIX
bool "Workarounds for PPC601 bugs"
- depends on 6xx && PPC_PMAC
+ depends on 6xx && (PPC_PREP || PPC_PMAC)
help
Some versions of the PPC601 (the first PowerPC chip) have bugs which
mean that extra synchronization instructions are required near
diff --git a/trunk/arch/powerpc/platforms/cell/pmu.c b/trunk/arch/powerpc/platforms/cell/pmu.c
index 348a27b12512..59c1a1694104 100644
--- a/trunk/arch/powerpc/platforms/cell/pmu.c
+++ b/trunk/arch/powerpc/platforms/cell/pmu.c
@@ -382,7 +382,7 @@ static int __init cbe_init_pm_irq(void)
unsigned int irq;
int rc, node;
- for_each_online_node(node) {
+ for_each_node(node) {
irq = irq_create_mapping(NULL, IIC_IRQ_IOEX_PMI |
(node << IIC_IRQ_NODE_SHIFT));
if (irq == NO_IRQ) {
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/file.c b/trunk/arch/powerpc/platforms/cell/spufs/file.c
index 0026a37e21fd..68c57d38745a 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/file.c
+++ b/trunk/arch/powerpc/platforms/cell/spufs/file.c
@@ -352,7 +352,7 @@ static unsigned long spufs_get_unmapped_area(struct file *file,
/* Else, try to obtain a 64K pages slice */
return slice_get_unmapped_area(addr, len, flags,
- MMU_PAGE_64K, 1);
+ MMU_PAGE_64K, 1, 0);
}
#endif /* CONFIG_SPU_FS_64K_LS */
diff --git a/trunk/arch/powerpc/platforms/cell/spufs/inode.c b/trunk/arch/powerpc/platforms/cell/spufs/inode.c
index 35f77a42bedf..3f3bb4cdbbec 100644
--- a/trunk/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/trunk/arch/powerpc/platforms/cell/spufs/inode.c
@@ -99,7 +99,6 @@ spufs_new_inode(struct super_block *sb, umode_t mode)
if (!inode)
goto out;
- inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
diff --git a/trunk/arch/powerpc/platforms/embedded6xx/Kconfig b/trunk/arch/powerpc/platforms/embedded6xx/Kconfig
index 302ba43d73a1..5a8f50a9afa7 100644
--- a/trunk/arch/powerpc/platforms/embedded6xx/Kconfig
+++ b/trunk/arch/powerpc/platforms/embedded6xx/Kconfig
@@ -9,6 +9,7 @@ config LINKSTATION
select FSL_SOC
select PPC_UDBG_16550 if SERIAL_8250
select DEFAULT_UIMAGE
+ select MPC10X_OPENPIC
select MPC10X_BRIDGE
help
Select LINKSTATION if configuring for one of PPC- (MPC8241)
@@ -23,6 +24,7 @@ config STORCENTER
select MPIC
select FSL_SOC
select PPC_UDBG_16550 if SERIAL_8250
+ select MPC10X_OPENPIC
select MPC10X_BRIDGE
help
Select STORCENTER if configuring for the iomega StorCenter
@@ -82,6 +84,9 @@ config MV64X60
select PPC_INDIRECT_PCI
select CHECK_CACHE_COHERENCY
+config MPC10X_OPENPIC
+ bool
+
config GAMECUBE_COMMON
bool
diff --git a/trunk/arch/powerpc/platforms/powernv/Kconfig b/trunk/arch/powerpc/platforms/powernv/Kconfig
index d3e840d643af..74fea5c21839 100644
--- a/trunk/arch/powerpc/platforms/powernv/Kconfig
+++ b/trunk/arch/powerpc/platforms/powernv/Kconfig
@@ -8,11 +8,6 @@ config PPC_POWERNV
select PPC_PCI_CHOICE if EMBEDDED
default y
-config POWERNV_MSI
- bool "Support PCI MSI on PowerNV platform"
- depends on PCI_MSI
- default y
-
config PPC_POWERNV_RTAS
depends on PPC_POWERNV
bool "Support for RTAS based PowerNV platforms such as BML"
diff --git a/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S b/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S
index 6fabe92eafb6..3bb07e5e43cd 100644
--- a/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/trunk/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -107,4 +107,3 @@ OPAL_CALL(opal_pci_mask_pe_error, OPAL_PCI_MASK_PE_ERROR);
OPAL_CALL(opal_set_slot_led_status, OPAL_SET_SLOT_LED_STATUS);
OPAL_CALL(opal_get_epow_status, OPAL_GET_EPOW_STATUS);
OPAL_CALL(opal_set_system_attention_led, OPAL_SET_SYSTEM_ATTENTION_LED);
-OPAL_CALL(opal_pci_msi_eoi, OPAL_PCI_MSI_EOI);
diff --git a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c
index 8c6c9cf91c13..8e90e8906df3 100644
--- a/trunk/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/trunk/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -26,12 +26,10 @@
#include
#include
#include
-#include
#include
#include
#include
#include
-#include
#include "powernv.h"
#include "pci.h"
@@ -89,7 +87,6 @@ static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
return IODA_INVALID_PE;
} while(test_and_set_bit(pe, phb->ioda.pe_alloc));
- phb->ioda.pe_array[pe].phb = phb;
phb->ioda.pe_array[pe].pe_number = pe;
return pe;
}
@@ -434,102 +431,22 @@ static void pnv_pci_ioda_setup_PEs(void)
}
}
-static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
+static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *dev)
{
- struct pci_dn *pdn = pnv_ioda_get_pdn(pdev);
- struct pnv_ioda_pe *pe;
-
- /*
- * The function can be called while the PE#
- * hasn't been assigned. Do nothing for the
- * case.
- */
- if (!pdn || pdn->pe_number == IODA_INVALID_PE)
- return;
-
- pe = &phb->ioda.pe_array[pdn->pe_number];
- set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ /* We delay DMA setup after we have assigned all PE# */
}
-static void pnv_pci_ioda1_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
+static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
{
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
- unsigned long start, end, inc;
-
- start = __pa(startp);
- end = __pa(endp);
-
- /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
- if (tbl->it_busno) {
- start <<= 12;
- end <<= 12;
- inc = 128 << 12;
- start |= tbl->it_busno;
- end |= tbl->it_busno;
- } else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
- /* p7ioc-style invalidation, 2 TCEs per write */
- start |= (1ull << 63);
- end |= (1ull << 63);
- inc = 16;
- } else {
- /* Default (older HW) */
- inc = 128;
- }
-
- end |= inc - 1; /* round up end to be different than start */
-
- mb(); /* Ensure above stores are visible */
- while (start <= end) {
- __raw_writeq(start, invalidate);
- start += inc;
- }
-
- /*
- * The iommu layer will do another mb() for us on build()
- * and we don't care on free()
- */
-}
+ struct pci_dev *dev;
-static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
- struct iommu_table *tbl,
- u64 *startp, u64 *endp)
-{
- unsigned long start, end, inc;
- u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
-
- /* We'll invalidate DMA address in PE scope */
- start = 0x2ul << 60;
- start |= (pe->pe_number & 0xFF);
- end = start;
-
- /* Figure out the start, end and step */
- inc = tbl->it_offset + (((u64)startp - tbl->it_base) / sizeof(u64));
- start |= (inc << 12);
- inc = tbl->it_offset + (((u64)endp - tbl->it_base) / sizeof(u64));
- end |= (inc << 12);
- inc = (0x1ul << 12);
- mb();
-
- while (start <= end) {
- __raw_writeq(start, invalidate);
- start += inc;
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ if (dev->subordinate)
+ pnv_ioda_setup_bus_dma(pe, dev->subordinate);
}
}
-void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp)
-{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32_table);
- struct pnv_phb *phb = pe->phb;
-
- if (phb->type == PNV_PHB_IODA1)
- pnv_pci_ioda1_tce_invalidate(tbl, startp, endp);
- else
- pnv_pci_ioda2_tce_invalidate(pe, tbl, startp, endp);
-}
-
static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe, unsigned int base,
unsigned int segs)
@@ -601,11 +518,16 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
*/
tbl->it_busno = 0;
tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE |
- TCE_PCI_SWINV_PAIR;
+ tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE
+ | TCE_PCI_SWINV_PAIR;
}
iommu_init_table(tbl, phb->hose->node);
+ if (pe->pdev)
+ set_iommu_table_base(&pe->pdev->dev, tbl);
+ else
+ pnv_ioda_setup_bus_dma(pe, pe->pbus);
+
return;
fail:
/* XXX Failure: Try to fallback to 64-bit only ? */
@@ -615,76 +537,6 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
-static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
- struct pnv_ioda_pe *pe)
-{
- struct page *tce_mem = NULL;
- void *addr;
- const __be64 *swinvp;
- struct iommu_table *tbl;
- unsigned int tce_table_size, end;
- int64_t rc;
-
- /* We shouldn't already have a 32-bit DMA associated */
- if (WARN_ON(pe->tce32_seg >= 0))
- return;
-
- /* The PE will reserve all possible 32-bits space */
- pe->tce32_seg = 0;
- end = (1 << ilog2(phb->ioda.m32_pci_base));
- tce_table_size = (end / 0x1000) * 8;
- pe_info(pe, "Setting up 32-bit TCE table at 0..%08x\n",
- end);
-
- /* Allocate TCE table */
- tce_mem = alloc_pages_node(phb->hose->node, GFP_KERNEL,
- get_order(tce_table_size));
- if (!tce_mem) {
- pe_err(pe, "Failed to allocate a 32-bit TCE memory\n");
- goto fail;
- }
- addr = page_address(tce_mem);
- memset(addr, 0, tce_table_size);
-
- /*
- * Map TCE table through TVT. The TVE index is the PE number
- * shifted by 1 bit for 32-bits DMA space.
- */
- rc = opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
- pe->pe_number << 1, 1, __pa(addr),
- tce_table_size, 0x1000);
- if (rc) {
- pe_err(pe, "Failed to configure 32-bit TCE table,"
- " err %ld\n", rc);
- goto fail;
- }
-
- /* Setup linux iommu table */
- tbl = &pe->tce32_table;
- pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0);
-
- /* OPAL variant of PHB3 invalidated TCEs */
- swinvp = of_get_property(phb->hose->dn, "ibm,opal-tce-kill", NULL);
- if (swinvp) {
- /* We need a couple more fields -- an address and a data
- * to or. Since the bus is only printed out on table free
- * errors, and on the first pass the data will be a relative
- * bus number, print that out instead.
- */
- tbl->it_busno = 0;
- tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
- }
- iommu_init_table(tbl, phb->hose->node);
-
- return;
-fail:
- if (pe->tce32_seg >= 0)
- pe->tce32_seg = -1;
- if (tce_mem)
- __free_pages(tce_mem, get_order(tce_table_size));
-}
-
static void pnv_ioda_setup_dma(struct pnv_phb *phb)
{
struct pci_controller *hose = phb->hose;
@@ -727,49 +579,20 @@ static void pnv_ioda_setup_dma(struct pnv_phb *phb)
if (segs > remaining)
segs = remaining;
}
-
- /*
- * For IODA2 compliant PHB3, we needn't care about the weight.
- * The all available 32-bits DMA space will be assigned to
- * the specific PE.
- */
- if (phb->type == PNV_PHB_IODA1) {
- pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
- pe->dma_weight, segs);
- pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
- } else {
- pe_info(pe, "Assign DMA32 space\n");
- segs = 0;
- pnv_pci_ioda2_setup_dma_pe(phb, pe);
- }
-
+ pe_info(pe, "DMA weight %d, assigned %d DMA32 segments\n",
+ pe->dma_weight, segs);
+ pnv_pci_ioda_setup_dma_pe(phb, pe, base, segs);
remaining -= segs;
base += segs;
}
}
#ifdef CONFIG_PCI_MSI
-static void pnv_ioda2_msi_eoi(struct irq_data *d)
-{
- unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
- struct irq_chip *chip = irq_data_get_irq_chip(d);
- struct pnv_phb *phb = container_of(chip, struct pnv_phb,
- ioda.irq_chip);
- int64_t rc;
-
- rc = opal_pci_msi_eoi(phb->opal_id, hw_irq);
- WARN_ON_ONCE(rc);
-
- icp_native_eoi(d);
-}
-
static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int virq,
- unsigned int is_64, struct msi_msg *msg)
+ unsigned int hwirq, unsigned int is_64,
+ struct msi_msg *msg)
{
struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
- struct irq_data *idata;
- struct irq_chip *ichip;
unsigned int xive_num = hwirq - phb->msi_base;
uint64_t addr64;
uint32_t addr32, data;
@@ -814,23 +637,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
}
msg->data = data;
- /*
- * Change the IRQ chip for the MSI interrupts on PHB3.
- * The corresponding IRQ chip should be populated for
- * the first time.
- */
- if (phb->type == PNV_PHB_IODA2) {
- if (!phb->ioda.irq_chip_init) {
- idata = irq_get_irq_data(virq);
- ichip = irq_data_get_irq_chip(idata);
- phb->ioda.irq_chip_init = 1;
- phb->ioda.irq_chip = *ichip;
- phb->ioda.irq_chip.irq_eoi = pnv_ioda2_msi_eoi;
- }
-
- irq_set_chip(virq, &phb->ioda.irq_chip);
- }
-
pr_devel("%s: %s-bit MSI on hwirq %x (xive #%d),"
" address=%x_%08x data=%x PE# %d\n",
pci_name(dev), is_64 ? "64" : "32", hwirq, xive_num,
@@ -841,7 +647,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
{
- unsigned int count;
+ unsigned int bmap_size;
const __be32 *prop = of_get_property(phb->hose->dn,
"ibm,opal-msi-ranges", NULL);
if (!prop) {
@@ -852,17 +658,18 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
return;
phb->msi_base = be32_to_cpup(prop);
- count = be32_to_cpup(prop + 1);
- if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
+ phb->msi_count = be32_to_cpup(prop + 1);
+ bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
+ phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
+ if (!phb->msi_map) {
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
phb->hose->global_number);
return;
}
-
phb->msi_setup = pnv_pci_ioda_msi_setup;
phb->msi32_support = 1;
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
- count, phb->msi_base);
+ phb->msi_count, phb->msi_base);
}
#else
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
@@ -1045,19 +852,18 @@ static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
return phb->ioda.pe_rmap[(bus->number << 8) | devfn];
}
-void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
+void __init pnv_pci_init_ioda1_phb(struct device_node *np)
{
struct pci_controller *hose;
static int primary = 1;
struct pnv_phb *phb;
unsigned long size, m32map_off, iomap_off, pemap_off;
const u64 *prop64;
- const u32 *prop32;
u64 phb_id;
void *aux;
long rc;
- pr_info(" Initializing IODA%d OPAL PHB %s\n", ioda_type, np->full_name);
+ pr_info(" Initializing IODA OPAL PHB %s\n", np->full_name);
prop64 = of_get_property(np, "ibm,opal-phbid", NULL);
if (!prop64) {
@@ -1084,34 +890,37 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
hose->last_busno = 0xff;
hose->private_data = phb;
phb->opal_id = phb_id;
- phb->type = ioda_type;
+ phb->type = PNV_PHB_IODA1;
/* Detect specific models for error handling */
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
phb->model = PNV_PHB_MODEL_P7IOC;
- else if (of_device_is_compatible(np, "ibm,p8-pciex"))
- phb->model = PNV_PHB_MODEL_PHB3;
else
phb->model = PNV_PHB_MODEL_UNKNOWN;
- /* Parse 32-bit and IO ranges (if any) */
+ /* We parse "ranges" now since we need to deduce the register base
+ * from the IO base
+ */
pci_process_bridge_OF_ranges(phb->hose, np, primary);
primary = 0;
- /* Get registers */
+ /* Magic formula from Milton */
phb->regs = of_iomap(np, 0);
if (phb->regs == NULL)
pr_err(" Failed to map registers !\n");
+
+ /* XXX This is hack-a-thon. This needs to be changed so that:
+ * - we obtain stuff like PE# etc... from device-tree
+ * - we properly re-allocate M32 ourselves
+ * (the OFW one isn't very good)
+ */
+
/* Initialize more IODA stuff */
- prop32 = of_get_property(np, "ibm,opal-num-pes", NULL);
- if (!prop32)
- phb->ioda.total_pe = 1;
- else
- phb->ioda.total_pe = *prop32;
+ phb->ioda.total_pe = 128;
phb->ioda.m32_size = resource_size(&hose->mem_resources[0]);
- /* FW Has already off top 64k of M32 space (MSI space) */
+ /* OFW Has already off top 64k of M32 space (MSI space) */
phb->ioda.m32_size += 0x10000;
phb->ioda.m32_segsize = phb->ioda.m32_size / phb->ioda.total_pe;
@@ -1121,10 +930,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
phb->ioda.io_segsize = phb->ioda.io_size / phb->ioda.total_pe;
phb->ioda.io_pci_base = 0; /* XXX calculate this ? */
- /* Allocate aux data & arrays
- *
- * XXX TODO: Don't allocate io segmap on PHB3
- */
+ /* Allocate aux data & arrays */
size = _ALIGN_UP(phb->ioda.total_pe / 8, sizeof(unsigned long));
m32map_off = size;
size += phb->ioda.total_pe * sizeof(phb->ioda.m32_segmap[0]);
@@ -1154,7 +960,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
hose->mem_resources[2].start = 0;
hose->mem_resources[2].end = 0;
-#if 0 /* We should really do that ... */
+#if 0
rc = opal_pci_set_phb_mem_window(opal->phb_id,
window_type,
window_num,
@@ -1168,6 +974,16 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
phb->ioda.m32_size, phb->ioda.m32_segsize,
phb->ioda.io_size, phb->ioda.io_segsize);
+ if (phb->regs) {
+ pr_devel(" BUID = 0x%016llx\n", in_be64(phb->regs + 0x100));
+ pr_devel(" PHB2_CR = 0x%016llx\n", in_be64(phb->regs + 0x160));
+ pr_devel(" IO_BAR = 0x%016llx\n", in_be64(phb->regs + 0x170));
+ pr_devel(" IO_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x178));
+ pr_devel(" IO_SAR = 0x%016llx\n", in_be64(phb->regs + 0x180));
+ pr_devel(" M32_BAR = 0x%016llx\n", in_be64(phb->regs + 0x190));
+ pr_devel(" M32_BAMR = 0x%016llx\n", in_be64(phb->regs + 0x198));
+ pr_devel(" M32_SAR = 0x%016llx\n", in_be64(phb->regs + 0x1a0));
+ }
phb->hose->ops = &pnv_pci_ops;
/* Setup RID -> PE mapping function */
@@ -1195,18 +1011,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np, int ioda_type)
rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
if (rc)
pr_warning(" OPAL Error %ld performing IODA table reset !\n", rc);
-
- /*
- * On IODA1 map everything to PE#0, on IODA2 we assume the IODA reset
- * has cleared the RTT which has the same effect
- */
- if (ioda_type == PNV_PHB_IODA1)
- opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
-}
-
-void pnv_pci_init_ioda2_phb(struct device_node *np)
-{
- pnv_pci_init_ioda_phb(np, PNV_PHB_IODA2);
+ opal_pci_set_pe(phb_id, 0, 0, 7, 1, 1 , OPAL_MAP_PE);
}
void __init pnv_pci_init_ioda_hub(struct device_node *np)
@@ -1229,6 +1034,6 @@ void __init pnv_pci_init_ioda_hub(struct device_node *np)
for_each_child_of_node(np, phbn) {
/* Look for IODA1 PHBs */
if (of_device_is_compatible(phbn, "ibm,ioda-phb"))
- pnv_pci_init_ioda_phb(phbn, PNV_PHB_IODA1);
+ pnv_pci_init_ioda1_phb(phbn);
}
}
diff --git a/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 92b37a0186c9..7db8771a40f5 100644
--- a/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/trunk/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -26,7 +26,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -42,8 +41,8 @@
#ifdef CONFIG_PCI_MSI
static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int virq,
- unsigned int is_64, struct msi_msg *msg)
+ unsigned int hwirq, unsigned int is_64,
+ struct msi_msg *msg)
{
if (WARN_ON(!is_64))
return -ENXIO;
@@ -56,7 +55,7 @@ static int pnv_pci_p5ioc2_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
{
- unsigned int count;
+ unsigned int bmap_size;
const __be32 *prop = of_get_property(phb->hose->dn,
"ibm,opal-msi-ranges", NULL);
if (!prop)
@@ -68,8 +67,10 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
if (of_device_is_compatible(phb->hose->dn, "ibm,p5ioc2-pcix"))
return;
phb->msi_base = be32_to_cpup(prop);
- count = be32_to_cpup(prop + 1);
- if (msi_bitmap_alloc(&phb->msi_bmp, count, phb->hose->dn)) {
+ phb->msi_count = be32_to_cpup(prop + 1);
+ bmap_size = BITS_TO_LONGS(phb->msi_count) * sizeof(unsigned long);
+ phb->msi_map = zalloc_maybe_bootmem(bmap_size, GFP_KERNEL);
+ if (!phb->msi_map) {
pr_err("PCI %d: Failed to allocate MSI bitmap !\n",
phb->hose->global_number);
return;
@@ -77,7 +78,7 @@ static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb)
phb->msi_setup = pnv_pci_p5ioc2_msi_setup;
phb->msi32_support = 0;
pr_info(" Allocated bitmap for %d MSIs (base IRQ 0x%x)\n",
- count, phb->msi_base);
+ phb->msi_count, phb->msi_base);
}
#else
static void pnv_pci_init_p5ioc2_msis(struct pnv_phb *phb) { }
diff --git a/trunk/arch/powerpc/platforms/powernv/pci.c b/trunk/arch/powerpc/platforms/powernv/pci.c
index 55dfca844ddf..b8b8e0bd9897 100644
--- a/trunk/arch/powerpc/platforms/powernv/pci.c
+++ b/trunk/arch/powerpc/platforms/powernv/pci.c
@@ -26,7 +26,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -48,7 +47,43 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
- return (phb && phb->msi_bmp.bitmap) ? 0 : -ENODEV;
+ return (phb && phb->msi_map) ? 0 : -ENODEV;
+}
+
+static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
+{
+ unsigned long flags;
+ unsigned int id, rc;
+
+ spin_lock_irqsave(&phb->lock, flags);
+
+ id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
+ if (id >= phb->msi_count && phb->msi_next)
+ id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
+ if (id >= phb->msi_count) {
+ rc = 0;
+ goto out;
+ }
+ __set_bit(id, phb->msi_map);
+ rc = id + phb->msi_base;
+out:
+ spin_unlock_irqrestore(&phb->lock, flags);
+ return rc;
+}
+
+static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
+{
+ unsigned long flags;
+ unsigned int id;
+
+ if (WARN_ON(hwirq < phb->msi_base ||
+ hwirq >= (phb->msi_base + phb->msi_count)))
+ return;
+ id = hwirq - phb->msi_base;
+
+ spin_lock_irqsave(&phb->lock, flags);
+ __clear_bit(id, phb->msi_map);
+ spin_unlock_irqrestore(&phb->lock, flags);
}
static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -57,8 +92,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct pnv_phb *phb = hose->private_data;
struct msi_desc *entry;
struct msi_msg msg;
- int hwirq;
- unsigned int virq;
+ unsigned int hwirq, virq;
int rc;
if (WARN_ON(!phb))
@@ -70,25 +104,25 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
pci_name(pdev));
return -ENXIO;
}
- hwirq = msi_bitmap_alloc_hwirqs(&phb->msi_bmp, 1);
- if (hwirq < 0) {
+ hwirq = pnv_get_one_msi(phb);
+ if (!hwirq) {
pr_warn("%s: Failed to find a free MSI\n",
pci_name(pdev));
return -ENOSPC;
}
- virq = irq_create_mapping(NULL, phb->msi_base + hwirq);
+ virq = irq_create_mapping(NULL, hwirq);
if (virq == NO_IRQ) {
pr_warn("%s: Failed to map MSI to linux irq\n",
pci_name(pdev));
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
+ pnv_put_msi(phb, hwirq);
return -ENOMEM;
}
- rc = phb->msi_setup(phb, pdev, phb->msi_base + hwirq,
- virq, entry->msi_attrib.is_64, &msg);
+ rc = phb->msi_setup(phb, pdev, hwirq, entry->msi_attrib.is_64,
+ &msg);
if (rc) {
pr_warn("%s: Failed to setup MSI\n", pci_name(pdev));
irq_dispose_mapping(virq);
- msi_bitmap_free_hwirqs(&phb->msi_bmp, hwirq, 1);
+ pnv_put_msi(phb, hwirq);
return rc;
}
irq_set_msi_desc(virq, entry);
@@ -110,8 +144,7 @@ static void pnv_teardown_msi_irqs(struct pci_dev *pdev)
if (entry->irq == NO_IRQ)
continue;
irq_set_msi_desc(entry->irq, NULL);
- msi_bitmap_free_hwirqs(&phb->msi_bmp,
- virq_to_hw(entry->irq) - phb->msi_base, 1);
+ pnv_put_msi(phb, virq_to_hw(entry->irq));
irq_dispose_mapping(entry->irq);
}
}
@@ -329,6 +362,48 @@ struct pci_ops pnv_pci_ops = {
.write = pnv_pci_write_config,
};
+
+static void pnv_tce_invalidate(struct iommu_table *tbl,
+ u64 *startp, u64 *endp)
+{
+ u64 __iomem *invalidate = (u64 __iomem *)tbl->it_index;
+ unsigned long start, end, inc;
+
+ start = __pa(startp);
+ end = __pa(endp);
+
+
+ /* BML uses this case for p6/p7/galaxy2: Shift addr and put in node */
+ if (tbl->it_busno) {
+ start <<= 12;
+ end <<= 12;
+ inc = 128 << 12;
+ start |= tbl->it_busno;
+ end |= tbl->it_busno;
+ }
+ /* p7ioc-style invalidation, 2 TCEs per write */
+ else if (tbl->it_type & TCE_PCI_SWINV_PAIR) {
+ start |= (1ull << 63);
+ end |= (1ull << 63);
+ inc = 16;
+ }
+ /* Default (older HW) */
+ else
+ inc = 128;
+
+ end |= inc - 1; /* round up end to be different than start */
+
+ mb(); /* Ensure above stores are visible */
+ while (start <= end) {
+ __raw_writeq(start, invalidate);
+ start += inc;
+ }
+ /* The iommu layer will do another mb() for us on build() and
+ * we don't care on free()
+ */
+}
+
+
static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
unsigned long uaddr, enum dma_data_direction direction,
struct dma_attrs *attrs)
@@ -353,7 +428,7 @@ static int pnv_tce_build(struct iommu_table *tbl, long index, long npages,
* of flags if that becomes the case
*/
if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+ pnv_tce_invalidate(tbl, tces, tcep - 1);
return 0;
}
@@ -367,8 +442,8 @@ static void pnv_tce_free(struct iommu_table *tbl, long index, long npages)
while (npages--)
*(tcep++) = 0;
- if (tbl->it_type & TCE_PCI_SWINV_CREATE)
- pnv_pci_ioda_tce_invalidate(tbl, tces, tcep - 1);
+ if (tbl->it_type & TCE_PCI_SWINV_FREE)
+ pnv_tce_invalidate(tbl, tces, tcep - 1);
}
static unsigned long pnv_tce_get(struct iommu_table *tbl, long index)
@@ -450,7 +525,7 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
-/* Fixup wrong class code in p7ioc and p8 root complex */
+/* Fixup wrong class code in p7ioc root complex */
static void pnv_p7ioc_rc_quirk(struct pci_dev *dev)
{
dev->class = PCI_CLASS_BRIDGE_PCI << 8;
@@ -516,10 +591,6 @@ void __init pnv_pci_init(void)
if (!found_ioda)
for_each_compatible_node(np, NULL, "ibm,p5ioc2")
pnv_pci_init_p5ioc2_hub(np);
-
- /* Look for ioda2 built-in PHB3's */
- for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
- pnv_pci_init_ioda2_phb(np);
}
/* Setup the linkage between OF nodes and PHBs */
diff --git a/trunk/arch/powerpc/platforms/powernv/pci.h b/trunk/arch/powerpc/platforms/powernv/pci.h
index 48dc4bb856a1..7cfb7c883deb 100644
--- a/trunk/arch/powerpc/platforms/powernv/pci.h
+++ b/trunk/arch/powerpc/platforms/powernv/pci.h
@@ -4,9 +4,9 @@
struct pci_dn;
enum pnv_phb_type {
- PNV_PHB_P5IOC2 = 0,
- PNV_PHB_IODA1 = 1,
- PNV_PHB_IODA2 = 2,
+ PNV_PHB_P5IOC2,
+ PNV_PHB_IODA1,
+ PNV_PHB_IODA2,
};
/* Precise PHB model for error management */
@@ -14,7 +14,6 @@ enum pnv_phb_model {
PNV_PHB_MODEL_UNKNOWN,
PNV_PHB_MODEL_P5IOC2,
PNV_PHB_MODEL_P7IOC,
- PNV_PHB_MODEL_PHB3,
};
#define PNV_PCI_DIAG_BUF_SIZE 4096
@@ -23,10 +22,8 @@ enum pnv_phb_model {
#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
/* Data associated with a PE, including IOMMU tracking etc.. */
-struct pnv_phb;
struct pnv_ioda_pe {
unsigned long flags;
- struct pnv_phb *phb;
/* A PE can be associated with a single device or an
* entire bus (& children). In the former case, pdev
@@ -76,13 +73,15 @@ struct pnv_phb {
spinlock_t lock;
#ifdef CONFIG_PCI_MSI
+ unsigned long *msi_map;
unsigned int msi_base;
+ unsigned int msi_count;
+ unsigned int msi_next;
unsigned int msi32_support;
- struct msi_bitmap msi_bmp;
#endif
int (*msi_setup)(struct pnv_phb *phb, struct pci_dev *dev,
- unsigned int hwirq, unsigned int virq,
- unsigned int is_64, struct msi_msg *msg);
+ unsigned int hwirq, unsigned int is_64,
+ struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
@@ -110,10 +109,6 @@ struct pnv_phb {
unsigned int *io_segmap;
struct pnv_ioda_pe *pe_array;
- /* IRQ chip */
- int irq_chip_init;
- struct irq_chip irq_chip;
-
/* Sorted list of used PE's based
* on the sequence of creation
*/
@@ -155,7 +150,6 @@ extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
u64 dma_offset);
extern void pnv_pci_init_p5ioc2_hub(struct device_node *np);
extern void pnv_pci_init_ioda_hub(struct device_node *np);
-extern void pnv_pci_init_ioda2_phb(struct device_node *np);
-extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
- u64 *startp, u64 *endp);
+
+
#endif /* __POWERNV_PCI_H */
diff --git a/trunk/arch/powerpc/platforms/prep/Kconfig b/trunk/arch/powerpc/platforms/prep/Kconfig
new file mode 100644
index 000000000000..1547f66235d9
--- /dev/null
+++ b/trunk/arch/powerpc/platforms/prep/Kconfig
@@ -0,0 +1,23 @@
+config PPC_PREP
+ bool "PowerPC Reference Platform (PReP) based machines"
+ depends on 6xx && BROKEN
+ select HAVE_PCSPKR_PLATFORM
+ select MPIC
+ select PPC_I8259
+ select PPC_INDIRECT_PCI
+ select PPC_UDBG_16550
+ select PPC_NATIVE
+ default n
+
+config PREP_RESIDUAL
+ bool "Support for PReP Residual Data"
+ depends on PPC_PREP
+ help
+ Some PReP systems have residual data passed to the kernel by the
+ firmware. This allows detection of memory size, devices present and
+ other useful pieces of information. Sometimes this information is
+ not present or incorrect, in which case it could lead to the machine
+ behaving incorrectly. If this happens, either disable PREP_RESIDUAL
+ or pass the 'noresidual' option to the kernel.
+
+ If you are running a PReP system, say Y here, otherwise say N.
diff --git a/trunk/arch/powerpc/platforms/ps3/time.c b/trunk/arch/powerpc/platforms/ps3/time.c
index cba1e6be68e5..40b5cb433005 100644
--- a/trunk/arch/powerpc/platforms/ps3/time.c
+++ b/trunk/arch/powerpc/platforms/ps3/time.c
@@ -89,8 +89,10 @@ static int __init ps3_rtc_init(void)
return -ENODEV;
pdev = platform_device_register_simple("rtc-ps3", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
- return PTR_RET(pdev);
+ return 0;
}
module_init(ps3_rtc_init);
diff --git a/trunk/arch/powerpc/platforms/pseries/firmware.c b/trunk/arch/powerpc/platforms/pseries/firmware.c
index 8c80588abacc..aa3693f7fb27 100644
--- a/trunk/arch/powerpc/platforms/pseries/firmware.c
+++ b/trunk/arch/powerpc/platforms/pseries/firmware.c
@@ -28,18 +28,18 @@
#include "pseries.h"
-struct hypertas_fw_feature {
+typedef struct {
unsigned long val;
char * name;
-};
+} firmware_feature_t;
/*
* The names in this table match names in rtas/ibm,hypertas-functions. If the
* entry ends in a '*', only upto the '*' is matched. Otherwise the entire
* string must match.
*/
-static __initdata struct hypertas_fw_feature
-hypertas_fw_features_table[] = {
+static __initdata firmware_feature_t
+firmware_features_table[FIRMWARE_MAX_FEATURES] = {
{FW_FEATURE_PFT, "hcall-pft"},
{FW_FEATURE_TCE, "hcall-tce"},
{FW_FEATURE_SPRG0, "hcall-sprg0"},
@@ -69,18 +69,20 @@ hypertas_fw_features_table[] = {
* device-tree/ibm,hypertas-functions. Ultimately this functionality may
* be moved into prom.c prom_init().
*/
-void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len)
+void __init fw_feature_init(const char *hypertas, unsigned long len)
{
const char *s;
int i;
- pr_debug(" -> fw_hypertas_feature_init()\n");
+ pr_debug(" -> fw_feature_init()\n");
for (s = hypertas; s < hypertas + len; s += strlen(s) + 1) {
- for (i = 0; i < ARRAY_SIZE(hypertas_fw_features_table); i++) {
- const char *name = hypertas_fw_features_table[i].name;
+ for (i = 0; i < FIRMWARE_MAX_FEATURES; i++) {
+ const char *name = firmware_features_table[i].name;
size_t size;
-
+ /* check value against table of strings */
+ if (!name)
+ continue;
/*
* If there is a '*' at the end of name, only check
* upto there
@@ -94,40 +96,10 @@ void __init fw_hypertas_feature_init(const char *hypertas, unsigned long len)
/* we have a match */
powerpc_firmware_features |=
- hypertas_fw_features_table[i].val;
+ firmware_features_table[i].val;
break;
}
}
- pr_debug(" <- fw_hypertas_feature_init()\n");
-}
-
-struct vec5_fw_feature {
- unsigned long val;
- unsigned int feature;
-};
-
-static __initdata struct vec5_fw_feature
-vec5_fw_features_table[] = {
- {FW_FEATURE_TYPE1_AFFINITY, OV5_TYPE1_AFFINITY},
- {FW_FEATURE_PRRN, OV5_PRRN},
-};
-
-void __init fw_vec5_feature_init(const char *vec5, unsigned long len)
-{
- unsigned int index, feat;
- int i;
-
- pr_debug(" -> fw_vec5_feature_init()\n");
-
- for (i = 0; i < ARRAY_SIZE(vec5_fw_features_table); i++) {
- index = OV5_INDX(vec5_fw_features_table[i].feature);
- feat = OV5_FEAT(vec5_fw_features_table[i].feature);
-
- if (vec5[index] & feat)
- powerpc_firmware_features |=
- vec5_fw_features_table[i].val;
- }
-
- pr_debug(" <- fw_vec5_feature_init()\n");
+ pr_debug(" <- fw_feature_init()\n");
}
diff --git a/trunk/arch/powerpc/platforms/pseries/iommu.c b/trunk/arch/powerpc/platforms/pseries/iommu.c
index 86ae364900d6..1b2a174e7c59 100644
--- a/trunk/arch/powerpc/platforms/pseries/iommu.c
+++ b/trunk/arch/powerpc/platforms/pseries/iommu.c
@@ -924,13 +924,6 @@ static void restore_default_window(struct pci_dev *dev,
__restore_default_window(pci_dev_to_eeh_dev(dev), ddw_restore_token);
}
-struct failed_ddw_pdn {
- struct device_node *pdn;
- struct list_head list;
-};
-
-static LIST_HEAD(failed_ddw_pdn_list);
-
/*
* If the PE supports dynamic dma windows, and there is space for a table
* that can map all pages in a linear offset, then setup such a table,
@@ -958,7 +951,6 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
struct dynamic_dma_window_prop *ddwprop;
const void *dma_window = NULL;
unsigned long liobn, offset, size;
- struct failed_ddw_pdn *fpdn;
mutex_lock(&direct_window_init_mutex);
@@ -966,18 +958,6 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (dma_addr != 0)
goto out_unlock;
- /*
- * If we already went through this for a previous function of
- * the same device and failed, we don't want to muck with the
- * DMA window again, as it will race with in-flight operations
- * and can lead to EEHs. The above mutex protects access to the
- * list.
- */
- list_for_each_entry(fpdn, &failed_ddw_pdn_list, list) {
- if (!strcmp(fpdn->pdn->full_name, pdn->full_name))
- goto out_unlock;
- }
-
/*
* the ibm,ddw-applicable property holds the tokens for:
* ibm,query-pe-dma-window
@@ -1134,12 +1114,6 @@ static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn)
if (ddw_restore_token)
restore_default_window(dev, ddw_restore_token);
- fpdn = kzalloc(sizeof(*fpdn), GFP_KERNEL);
- if (!fpdn)
- goto out_unlock;
- fpdn->pdn = pdn;
- list_add(&fpdn->list, &failed_ddw_pdn_list);
-
out_unlock:
mutex_unlock(&direct_window_init_mutex);
return dma_addr;
diff --git a/trunk/arch/powerpc/platforms/pseries/lpar.c b/trunk/arch/powerpc/platforms/pseries/lpar.c
index 299731e9036b..0da39fed355a 100644
--- a/trunk/arch/powerpc/platforms/pseries/lpar.c
+++ b/trunk/arch/powerpc/platforms/pseries/lpar.c
@@ -186,13 +186,7 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group)
(0x1UL << 4), &dummy1, &dummy2);
if (lpar_rc == H_SUCCESS)
return i;
-
- /*
- * The test for adjunct partition is performed before the
- * ANDCOND test. H_RESOURCE may be returned, so we need to
- * check for that as well.
- */
- BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE);
+ BUG_ON(lpar_rc != H_NOT_FOUND);
slot_offset++;
slot_offset &= 0x7;
diff --git a/trunk/arch/powerpc/platforms/pseries/mobility.c b/trunk/arch/powerpc/platforms/pseries/mobility.c
index 3d01eee9ffb1..6573808cc5f3 100644
--- a/trunk/arch/powerpc/platforms/pseries/mobility.c
+++ b/trunk/arch/powerpc/platforms/pseries/mobility.c
@@ -37,16 +37,14 @@ struct update_props_workarea {
#define UPDATE_DT_NODE 0x02000000
#define ADD_DT_NODE 0x03000000
-#define MIGRATION_SCOPE (1)
-
-static int mobility_rtas_call(int token, char *buf, s32 scope)
+static int mobility_rtas_call(int token, char *buf)
{
int rc;
spin_lock(&rtas_data_buf_lock);
memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE);
- rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, scope);
+ rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, 1);
memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE);
spin_unlock(&rtas_data_buf_lock);
@@ -125,7 +123,7 @@ static int update_dt_property(struct device_node *dn, struct property **prop,
return 0;
}
-static int update_dt_node(u32 phandle, s32 scope)
+static int update_dt_node(u32 phandle)
{
struct update_props_workarea *upwa;
struct device_node *dn;
@@ -134,7 +132,6 @@ static int update_dt_node(u32 phandle, s32 scope)
char *prop_data;
char *rtas_buf;
int update_properties_token;
- u32 vd;
update_properties_token = rtas_token("ibm,update-properties");
if (update_properties_token == RTAS_UNKNOWN_SERVICE)
@@ -154,31 +151,19 @@ static int update_dt_node(u32 phandle, s32 scope)
upwa->phandle = phandle;
do {
- rc = mobility_rtas_call(update_properties_token, rtas_buf,
- scope);
+ rc = mobility_rtas_call(update_properties_token, rtas_buf);
if (rc < 0)
break;
prop_data = rtas_buf + sizeof(*upwa);
- /* The first element of the buffer is the path of the node
- * being updated in the form of a 8 byte string length
- * followed by the string. Skip past this to get to the
- * properties being updated.
- */
- vd = *prop_data++;
- prop_data += vd;
-
- /* The path we skipped over is counted as one of the elements
- * returned so start counting at one.
- */
- for (i = 1; i < upwa->nprops; i++) {
+ for (i = 0; i < upwa->nprops; i++) {
char *prop_name;
+ u32 vd;
- prop_name = prop_data;
+ prop_name = prop_data + 1;
prop_data += strlen(prop_name) + 1;
- vd = *(u32 *)prop_data;
- prop_data += sizeof(vd);
+ vd = *prop_data++;
switch (vd) {
case 0x00000000:
@@ -234,7 +219,7 @@ static int add_dt_node(u32 parent_phandle, u32 drc_index)
return rc;
}
-int pseries_devicetree_update(s32 scope)
+static int pseries_devicetree_update(void)
{
char *rtas_buf;
u32 *data;
@@ -250,7 +235,7 @@ int pseries_devicetree_update(s32 scope)
return -ENOMEM;
do {
- rc = mobility_rtas_call(update_nodes_token, rtas_buf, scope);
+ rc = mobility_rtas_call(update_nodes_token, rtas_buf);
if (rc && rc != 1)
break;
@@ -271,7 +256,7 @@ int pseries_devicetree_update(s32 scope)
delete_dt_node(phandle);
break;
case UPDATE_DT_NODE:
- update_dt_node(phandle, scope);
+ update_dt_node(phandle);
break;
case ADD_DT_NODE:
drc_index = *data++;
@@ -291,7 +276,7 @@ void post_mobility_fixup(void)
int rc;
int activate_fw_token;
- rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ rc = pseries_devicetree_update();
if (rc) {
printk(KERN_ERR "Initial post-mobility device tree update "
"failed: %d\n", rc);
@@ -307,7 +292,7 @@ void post_mobility_fixup(void)
rc = rtas_call(activate_fw_token, 0, 1, NULL);
if (!rc) {
- rc = pseries_devicetree_update(MIGRATION_SCOPE);
+ rc = pseries_devicetree_update();
if (rc)
printk(KERN_ERR "Secondary post-mobility device tree "
"update failed: %d\n", rc);
diff --git a/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h b/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h
index f35787b6a5e0..f368668d97b3 100644
--- a/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/trunk/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -58,39 +58,40 @@ static inline long extended_cede_processor(unsigned long latency_hint)
static inline long vpa_call(unsigned long flags, unsigned long cpu,
unsigned long vpa)
{
- flags = flags << H_VPA_FUNC_SHIFT;
+ /* flags are in bits 16-18 (counting from most significant bit) */
+ flags = flags << (63 - 18);
return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
}
static inline long unregister_vpa(unsigned long cpu)
{
- return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
+ return vpa_call(0x5, cpu, 0);
}
static inline long register_vpa(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(H_VPA_REG_VPA, cpu, vpa);
+ return vpa_call(0x1, cpu, vpa);
}
static inline long unregister_slb_shadow(unsigned long cpu)
{
- return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
+ return vpa_call(0x7, cpu, 0);
}
static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(H_VPA_REG_SLB, cpu, vpa);
+ return vpa_call(0x3, cpu, vpa);
}
static inline long unregister_dtl(unsigned long cpu)
{
- return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
+ return vpa_call(0x6, cpu, 0);
}
static inline long register_dtl(unsigned long cpu, unsigned long vpa)
{
- return vpa_call(H_VPA_REG_DTL, cpu, vpa);
+ return vpa_call(0x2, cpu, vpa);
}
static inline long plpar_page_set_loaned(unsigned long vpa)
diff --git a/trunk/arch/powerpc/platforms/pseries/pseries.h b/trunk/arch/powerpc/platforms/pseries/pseries.h
index 8af71e4cc17f..9a3dda07566f 100644
--- a/trunk/arch/powerpc/platforms/pseries/pseries.h
+++ b/trunk/arch/powerpc/platforms/pseries/pseries.h
@@ -19,10 +19,7 @@ extern void request_event_sources_irqs(struct device_node *np,
#include
-extern void __init fw_hypertas_feature_init(const char *hypertas,
- unsigned long len);
-extern void __init fw_vec5_feature_init(const char *hypertas,
- unsigned long len);
+extern void __init fw_feature_init(const char *hypertas, unsigned long len);
struct pt_regs;
diff --git a/trunk/arch/powerpc/platforms/pseries/setup.c b/trunk/arch/powerpc/platforms/pseries/setup.c
index ac932a9eb440..8bcc9ca6682f 100644
--- a/trunk/arch/powerpc/platforms/pseries/setup.c
+++ b/trunk/arch/powerpc/platforms/pseries/setup.c
@@ -628,39 +628,25 @@ static void __init pSeries_init_early(void)
* Called very early, MMU is off, device-tree isn't unflattened
*/
-static int __init pseries_probe_fw_features(unsigned long node,
- const char *uname, int depth,
- void *data)
+static int __init pSeries_probe_hypertas(unsigned long node,
+ const char *uname, int depth,
+ void *data)
{
- const char *prop;
+ const char *hypertas;
unsigned long len;
- static int hypertas_found;
- static int vec5_found;
- if (depth != 1)
+ if (depth != 1 ||
+ (strcmp(uname, "rtas") != 0 && strcmp(uname, "rtas@0") != 0))
return 0;
- if (!strcmp(uname, "rtas") || !strcmp(uname, "rtas@0")) {
- prop = of_get_flat_dt_prop(node, "ibm,hypertas-functions",
- &len);
- if (prop) {
- powerpc_firmware_features |= FW_FEATURE_LPAR;
- fw_hypertas_feature_init(prop, len);
- }
-
- hypertas_found = 1;
- }
+ hypertas = of_get_flat_dt_prop(node, "ibm,hypertas-functions", &len);
+ if (!hypertas)
+ return 1;
- if (!strcmp(uname, "chosen")) {
- prop = of_get_flat_dt_prop(node, "ibm,architecture-vec-5",
- &len);
- if (prop)
- fw_vec5_feature_init(prop, len);
+ powerpc_firmware_features |= FW_FEATURE_LPAR;
+ fw_feature_init(hypertas, len);
- vec5_found = 1;
- }
-
- return hypertas_found && vec5_found;
+ return 1;
}
static int __init pSeries_probe(void)
@@ -683,7 +669,7 @@ static int __init pSeries_probe(void)
pr_debug("pSeries detected, looking for LPAR capability...\n");
/* Now try to figure out if we are running on LPAR */
- of_scan_flat_dt(pseries_probe_fw_features, NULL);
+ of_scan_flat_dt(pSeries_probe_hypertas, NULL);
if (firmware_has_feature(FW_FEATURE_LPAR))
hpte_init_lpar();
diff --git a/trunk/arch/powerpc/platforms/wsp/Kconfig b/trunk/arch/powerpc/platforms/wsp/Kconfig
index 422a175b10ee..79d2225b7608 100644
--- a/trunk/arch/powerpc/platforms/wsp/Kconfig
+++ b/trunk/arch/powerpc/platforms/wsp/Kconfig
@@ -9,6 +9,7 @@ config PPC_WSP
select PCI
select PPC_IO_WORKAROUNDS if PCI
select PPC_INDIRECT_PIO if PCI
+ select PPC_WSP_COPRO
default n
menu "WSP platform selection"
@@ -28,3 +29,7 @@ config PPC_CHROMA
default y
endmenu
+
+config PPC_A2_DD2
+ bool "Support for DD2 based A2/WSP systems"
+ depends on PPC_A2
diff --git a/trunk/arch/powerpc/sysdev/Kconfig b/trunk/arch/powerpc/sysdev/Kconfig
index ab4cb5476472..a84fecf63c4d 100644
--- a/trunk/arch/powerpc/sysdev/Kconfig
+++ b/trunk/arch/powerpc/sysdev/Kconfig
@@ -19,7 +19,6 @@ config PPC_MSI_BITMAP
default y if MPIC
default y if FSL_PCI
default y if PPC4xx_MSI
- default y if POWERNV_MSI
source "arch/powerpc/sysdev/xics/Kconfig"
diff --git a/trunk/arch/powerpc/sysdev/rtc_cmos_setup.c b/trunk/arch/powerpc/sysdev/rtc_cmos_setup.c
index af79e1ea74b6..9afba924e94f 100644
--- a/trunk/arch/powerpc/sysdev/rtc_cmos_setup.c
+++ b/trunk/arch/powerpc/sysdev/rtc_cmos_setup.c
@@ -62,7 +62,10 @@ static int __init add_rtc(void)
pd = platform_device_register_simple("rtc_cmos", -1,
&res[0], num_res);
- return PTR_RET(pd);
+ if (IS_ERR(pd))
+ return PTR_ERR(pd);
+
+ return 0;
}
fs_initcall(add_rtc);
diff --git a/trunk/arch/powerpc/sysdev/xics/icp-native.c b/trunk/arch/powerpc/sysdev/xics/icp-native.c
index 89db29d17c25..48861d3fcd07 100644
--- a/trunk/arch/powerpc/sysdev/xics/icp-native.c
+++ b/trunk/arch/powerpc/sysdev/xics/icp-native.c
@@ -81,7 +81,7 @@ static void icp_native_set_cpu_priority(unsigned char cppr)
iosync();
}
-void icp_native_eoi(struct irq_data *d)
+static void icp_native_eoi(struct irq_data *d)
{
unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
diff --git a/trunk/arch/s390/include/asm/io.h b/trunk/arch/s390/include/asm/io.h
index 379d96e2105e..27cb32185ce1 100644
--- a/trunk/arch/s390/include/asm/io.h
+++ b/trunk/arch/s390/include/asm/io.h
@@ -50,6 +50,10 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
#define ioremap_nocache(addr, size) ioremap(addr, size)
#define ioremap_wc ioremap_nocache
+/* TODO: s390 cannot support io_remap_pfn_range... */
+#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
+ remap_pfn_range(vma, vaddr, pfn, size, prot)
+
static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
{
return (void __iomem *) offset;
diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h
index 3cb47cf02530..4a2930844d43 100644
--- a/trunk/arch/s390/include/asm/pgtable.h
+++ b/trunk/arch/s390/include/asm/pgtable.h
@@ -57,10 +57,6 @@ extern unsigned long zero_page_mask;
(((unsigned long)(vaddr)) &zero_page_mask))))
#define __HAVE_COLOR_ZERO_PAGE
-/* TODO: s390 cannot support io_remap_pfn_range... */
-#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
- remap_pfn_range(vma, vaddr, pfn, size, prot)
-
#endif /* !__ASSEMBLY__ */
/*
@@ -348,7 +344,6 @@ extern unsigned long MODULES_END;
#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
/* Bits in the segment table entry */
-#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
@@ -1536,8 +1531,7 @@ extern int s390_enable_sie(void);
/*
* No page table caches to initialise
*/
-static inline void pgtable_cache_init(void) { }
-static inline void check_pgt_cache(void) { }
+#define pgtable_cache_init() do { } while (0)
#include
diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c
index 466fb3383960..dff631d34b45 100644
--- a/trunk/arch/s390/lib/uaccess_pt.c
+++ b/trunk/arch/s390/lib/uaccess_pt.c
@@ -77,68 +77,41 @@ static size_t copy_in_kernel(size_t count, void __user *to,
* >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
* contains the (negative) exception code.
*/
-#ifdef CONFIG_64BIT
-static unsigned long follow_table(struct mm_struct *mm,
- unsigned long address, int write)
+static __always_inline unsigned long follow_table(struct mm_struct *mm,
+ unsigned long addr, int write)
{
- unsigned long *table = (unsigned long *)__pa(mm->pgd);
-
- switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
- case _ASCE_TYPE_REGION1:
- table = table + ((address >> 53) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
- return -0x39UL;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- case _ASCE_TYPE_REGION2:
- table = table + ((address >> 42) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
- return -0x3aUL;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- case _ASCE_TYPE_REGION3:
- table = table + ((address >> 31) & 0x7ff);
- if (unlikely(*table & _REGION_ENTRY_INV))
- return -0x3bUL;
- table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
- case _ASCE_TYPE_SEGMENT:
- table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INV))
- return -0x10UL;
- if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
- if (write && (*table & _SEGMENT_ENTRY_RO))
- return -0x04UL;
- return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
- (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
- }
- table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- }
- table = table + ((address >> 12) & 0xff);
- if (unlikely(*table & _PAGE_INVALID))
- return -0x11UL;
- if (write && (*table & _PAGE_RO))
- return -0x04UL;
- return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
-}
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *ptep;
-#else /* CONFIG_64BIT */
+ pgd = pgd_offset(mm, addr);
+ if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
+ return -0x3aUL;
-static unsigned long follow_table(struct mm_struct *mm,
- unsigned long address, int write)
-{
- unsigned long *table = (unsigned long *)__pa(mm->pgd);
+ pud = pud_offset(pgd, addr);
+ if (pud_none(*pud) || unlikely(pud_bad(*pud)))
+ return -0x3bUL;
- table = table + ((address >> 20) & 0x7ff);
- if (unlikely(*table & _SEGMENT_ENTRY_INV))
+ pmd = pmd_offset(pud, addr);
+ if (pmd_none(*pmd))
return -0x10UL;
- table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
- table = table + ((address >> 12) & 0xff);
- if (unlikely(*table & _PAGE_INVALID))
+ if (pmd_large(*pmd)) {
+ if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
+ return -0x04UL;
+ return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
+ }
+ if (unlikely(pmd_bad(*pmd)))
+ return -0x10UL;
+
+ ptep = pte_offset_map(pmd, addr);
+ if (!pte_present(*ptep))
return -0x11UL;
- if (write && (*table & _PAGE_RO))
+ if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
return -0x04UL;
- return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
-}
-#endif /* CONFIG_64BIT */
+ return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
+}
static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
size_t n, int write_user)
@@ -224,7 +197,7 @@ size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
static size_t clear_user_pt(size_t n, void __user *to)
{
- void *zpage = (void *) empty_zero_page;
+ void *zpage = &empty_zero_page;
long done, size, ret;
done = 0;
diff --git a/trunk/arch/sh/mm/Kconfig b/trunk/arch/sh/mm/Kconfig
index dba285e86808..5a43a871e097 100644
--- a/trunk/arch/sh/mm/Kconfig
+++ b/trunk/arch/sh/mm/Kconfig
@@ -137,6 +137,13 @@ config ARCH_SPARSEMEM_ENABLE
config ARCH_SPARSEMEM_DEFAULT
def_bool y
+config MAX_ACTIVE_REGIONS
+ int
+ default "6" if (CPU_SUBTYPE_SHX3 && SPARSEMEM)
+ default "2" if SPARSEMEM && (CPU_SUBTYPE_SH7722 || \
+ CPU_SUBTYPE_SH7785)
+ default "1"
+
config ARCH_SELECT_MEMORY_MODEL
def_bool y
diff --git a/trunk/arch/sparc/include/asm/Kbuild b/trunk/arch/sparc/include/asm/Kbuild
index ff18e3cfb6b1..e26d430ce2fd 100644
--- a/trunk/arch/sparc/include/asm/Kbuild
+++ b/trunk/arch/sparc/include/asm/Kbuild
@@ -2,16 +2,11 @@
generic-y += clkdev.h
-generic-y += cputime.h
generic-y += div64.h
-generic-y += emergency-restart.h
generic-y += exec.h
generic-y += local64.h
-generic-y += mutex.h
generic-y += irq_regs.h
generic-y += local.h
generic-y += module.h
-generic-y += serial.h
generic-y += trace_clock.h
-generic-y += types.h
generic-y += word-at-a-time.h
diff --git a/trunk/arch/sparc/include/asm/cputime.h b/trunk/arch/sparc/include/asm/cputime.h
new file mode 100644
index 000000000000..1a642b81e019
--- /dev/null
+++ b/trunk/arch/sparc/include/asm/cputime.h
@@ -0,0 +1,6 @@
+#ifndef __SPARC_CPUTIME_H
+#define __SPARC_CPUTIME_H
+
+#include
+
+#endif /* __SPARC_CPUTIME_H */
diff --git a/trunk/arch/sparc/include/asm/emergency-restart.h b/trunk/arch/sparc/include/asm/emergency-restart.h
new file mode 100644
index 000000000000..108d8c48e42e
--- /dev/null
+++ b/trunk/arch/sparc/include/asm/emergency-restart.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_EMERGENCY_RESTART_H
+#define _ASM_EMERGENCY_RESTART_H
+
+#include
+
+#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/trunk/arch/sparc/include/asm/mutex.h b/trunk/arch/sparc/include/asm/mutex.h
new file mode 100644
index 000000000000..458c1f7fbc18
--- /dev/null
+++ b/trunk/arch/sparc/include/asm/mutex.h
@@ -0,0 +1,9 @@
+/*
+ * Pull in the generic implementation for the mutex fastpath.
+ *
+ * TODO: implement optimized primitives instead, or leave the generic
+ * implementation in place, or pick the atomic_xchg() based generic
+ * implementation. (see asm-generic/mutex-xchg.h for details)
+ */
+
+#include
diff --git a/trunk/arch/sparc/include/asm/pgtable_64.h b/trunk/arch/sparc/include/asm/pgtable_64.h
index 7619f2f792af..08fcce90316b 100644
--- a/trunk/arch/sparc/include/asm/pgtable_64.h
+++ b/trunk/arch/sparc/include/asm/pgtable_64.h
@@ -915,7 +915,6 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
}
-#include
#include
/* We provide our own get_unmapped_area to cope with VA holes and
diff --git a/trunk/arch/sparc/include/asm/serial.h b/trunk/arch/sparc/include/asm/serial.h
new file mode 100644
index 000000000000..f90d61c28059
--- /dev/null
+++ b/trunk/arch/sparc/include/asm/serial.h
@@ -0,0 +1,6 @@
+#ifndef __SPARC_SERIAL_H
+#define __SPARC_SERIAL_H
+
+#define BASE_BAUD ( 1843200 / 16 )
+
+#endif /* __SPARC_SERIAL_H */
diff --git a/trunk/arch/sparc/include/asm/smp_32.h b/trunk/arch/sparc/include/asm/smp_32.h
index 3c8917f054de..b73da3c5f10a 100644
--- a/trunk/arch/sparc/include/asm/smp_32.h
+++ b/trunk/arch/sparc/include/asm/smp_32.h
@@ -36,6 +36,7 @@ typedef void (*smpfunc_t)(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
void cpu_panic(void);
+extern void smp4m_irq_rotate(int cpu);
/*
* General functions that each host system must provide.
@@ -45,6 +46,7 @@ void sun4m_init_smp(void);
void sun4d_init_smp(void);
void smp_callin(void);
+void smp_boot_cpus(void);
void smp_store_cpu_info(int);
void smp_resched_interrupt(void);
@@ -105,6 +107,9 @@ extern int hard_smp_processor_id(void);
#define raw_smp_processor_id() (current_thread_info()->cpu)
+#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
+#define prof_counter(__cpu) cpu_data(__cpu).counter
+
void smp_setup_cpu_possible_map(void);
#endif /* !(__ASSEMBLY__) */
diff --git a/trunk/arch/sparc/include/asm/switch_to_64.h b/trunk/arch/sparc/include/asm/switch_to_64.h
index c7de3323819c..cad36f56fa03 100644
--- a/trunk/arch/sparc/include/asm/switch_to_64.h
+++ b/trunk/arch/sparc/include/asm/switch_to_64.h
@@ -18,7 +18,8 @@ do { \
* and 2 stores in this critical code path. -DaveM
*/
#define switch_to(prev, next, last) \
-do { save_and_clear_fpu(); \
+do { flush_tlb_pending(); \
+ save_and_clear_fpu(); \
/* If you are tempted to conditionalize the following */ \
/* so that ASI is only written if it changes, think again. */ \
__asm__ __volatile__("wr %%g0, %0, %%asi" \
diff --git a/trunk/arch/sparc/include/asm/tlbflush_64.h b/trunk/arch/sparc/include/asm/tlbflush_64.h
index f0d6a9700f4c..2ef463494153 100644
--- a/trunk/arch/sparc/include/asm/tlbflush_64.h
+++ b/trunk/arch/sparc/include/asm/tlbflush_64.h
@@ -11,40 +11,24 @@
struct tlb_batch {
struct mm_struct *mm;
unsigned long tlb_nr;
- unsigned long active;
unsigned long vaddrs[TLB_BATCH_NR];
};
extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
extern void flush_tsb_user(struct tlb_batch *tb);
-extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
/* TLB flush operations. */
-static inline void flush_tlb_mm(struct mm_struct *mm)
-{
-}
-
-static inline void flush_tlb_page(struct vm_area_struct *vma,
- unsigned long vmaddr)
-{
-}
-
-static inline void flush_tlb_range(struct vm_area_struct *vma,
- unsigned long start, unsigned long end)
-{
-}
-
-#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-
extern void flush_tlb_pending(void);
-extern void arch_enter_lazy_mmu_mode(void);
-extern void arch_leave_lazy_mmu_mode(void);
-#define arch_flush_lazy_mmu_mode() do {} while (0)
+
+#define flush_tlb_range(vma,start,end) \
+ do { (void)(start); flush_tlb_pending(); } while (0)
+#define flush_tlb_page(vma,addr) flush_tlb_pending()
+#define flush_tlb_mm(mm) flush_tlb_pending()
/* Local cpu only. */
extern void __flush_tlb_all(void);
-extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
+
extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
#ifndef CONFIG_SMP
@@ -54,24 +38,15 @@ do { flush_tsb_kernel_range(start,end); \
__flush_tlb_kernel_range(start,end); \
} while (0)
-static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
-{
- __flush_tlb_page(CTX_HWBITS(mm->context), vaddr);
-}
-
#else /* CONFIG_SMP */
extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
-extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
#define flush_tlb_kernel_range(start, end) \
do { flush_tsb_kernel_range(start,end); \
smp_flush_tlb_kernel_range(start, end); \
} while (0)
-#define global_flush_tlb_page(mm, vaddr) \
- smp_flush_tlb_page(mm, vaddr)
-
#endif /* ! CONFIG_SMP */
#endif /* _SPARC64_TLBFLUSH_H */
diff --git a/trunk/arch/sparc/include/uapi/asm/Kbuild b/trunk/arch/sparc/include/uapi/asm/Kbuild
index b5843ee09fb5..ce175aff71b7 100644
--- a/trunk/arch/sparc/include/uapi/asm/Kbuild
+++ b/trunk/arch/sparc/include/uapi/asm/Kbuild
@@ -44,6 +44,7 @@ header-y += swab.h
header-y += termbits.h
header-y += termios.h
header-y += traps.h
+header-y += types.h
header-y += uctx.h
header-y += unistd.h
header-y += utrap.h
diff --git a/trunk/arch/sparc/include/uapi/asm/types.h b/trunk/arch/sparc/include/uapi/asm/types.h
new file mode 100644
index 000000000000..383d156cde9c
--- /dev/null
+++ b/trunk/arch/sparc/include/uapi/asm/types.h
@@ -0,0 +1,17 @@
+#ifndef _SPARC_TYPES_H
+#define _SPARC_TYPES_H
+/*
+ * This file is never included by application software unless
+ * explicitly requested (e.g., via linux/types.h) in which case the
+ * application is Linux specific so (user-) name space pollution is
+ * not a major issue. However, for interoperability, libraries still
+ * need to be careful to avoid a name clashes.
+ */
+
+#if defined(__sparc__)
+
+#include
+
+#endif /* defined(__sparc__) */
+
+#endif /* defined(_SPARC_TYPES_H) */
diff --git a/trunk/arch/sparc/kernel/smp_64.c b/trunk/arch/sparc/kernel/smp_64.c
index ca64d2a86ec0..537eb66abd06 100644
--- a/trunk/arch/sparc/kernel/smp_64.c
+++ b/trunk/arch/sparc/kernel/smp_64.c
@@ -849,7 +849,7 @@ void smp_tsb_sync(struct mm_struct *mm)
}
extern unsigned long xcall_flush_tlb_mm;
-extern unsigned long xcall_flush_tlb_page;
+extern unsigned long xcall_flush_tlb_pending;
extern unsigned long xcall_flush_tlb_kernel_range;
extern unsigned long xcall_fetch_glob_regs;
extern unsigned long xcall_fetch_glob_pmu;
@@ -1074,56 +1074,23 @@ void smp_flush_tlb_mm(struct mm_struct *mm)
put_cpu();
}
-struct tlb_pending_info {
- unsigned long ctx;
- unsigned long nr;
- unsigned long *vaddrs;
-};
-
-static void tlb_pending_func(void *info)
-{
- struct tlb_pending_info *t = info;
-
- __flush_tlb_pending(t->ctx, t->nr, t->vaddrs);
-}
-
void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs)
{
u32 ctx = CTX_HWBITS(mm->context);
- struct tlb_pending_info info;
int cpu = get_cpu();
- info.ctx = ctx;
- info.nr = nr;
- info.vaddrs = vaddrs;
-
if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
else
- smp_call_function_many(mm_cpumask(mm), tlb_pending_func,
- &info, 1);
+ smp_cross_call_masked(&xcall_flush_tlb_pending,
+ ctx, nr, (unsigned long) vaddrs,
+ mm_cpumask(mm));
__flush_tlb_pending(ctx, nr, vaddrs);
put_cpu();
}
-void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr)
-{
- unsigned long context = CTX_HWBITS(mm->context);
- int cpu = get_cpu();
-
- if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
- cpumask_copy(mm_cpumask(mm), cpumask_of(cpu));
- else
- smp_cross_call_masked(&xcall_flush_tlb_page,
- context, vaddr, 0,
- mm_cpumask(mm));
- __flush_tlb_page(context, vaddr);
-
- put_cpu();
-}
-
void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
start &= PAGE_MASK;
diff --git a/trunk/arch/sparc/lib/bitext.c b/trunk/arch/sparc/lib/bitext.c
index 8ec4e9c0251a..48d00e72ce15 100644
--- a/trunk/arch/sparc/lib/bitext.c
+++ b/trunk/arch/sparc/lib/bitext.c
@@ -119,7 +119,11 @@ void bit_map_clear(struct bit_map *t, int offset, int len)
void bit_map_init(struct bit_map *t, unsigned long *map, int size)
{
- bitmap_zero(map, size);
+
+ if ((size & 07) != 0)
+ BUG();
+ memset(map, 0, size>>3);
+
memset(t, 0, sizeof *t);
spin_lock_init(&t->lock);
t->map = map;
diff --git a/trunk/arch/sparc/mm/iommu.c b/trunk/arch/sparc/mm/iommu.c
index 28f96f27c768..0f4f7191fbba 100644
--- a/trunk/arch/sparc/mm/iommu.c
+++ b/trunk/arch/sparc/mm/iommu.c
@@ -34,7 +34,7 @@
#define IOMMU_RNGE IOMMU_RNGE_256MB
#define IOMMU_START 0xF0000000
#define IOMMU_WINSIZE (256*1024*1024U)
-#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 256KB */
+#define IOMMU_NPTES (IOMMU_WINSIZE/PAGE_SIZE) /* 64K PTEs, 265KB */
#define IOMMU_ORDER 6 /* 4096 * (1<<6) */
/* srmmu.c */
diff --git a/trunk/arch/sparc/mm/srmmu.c b/trunk/arch/sparc/mm/srmmu.c
index 036c2797dece..c38bb72e3e80 100644
--- a/trunk/arch/sparc/mm/srmmu.c
+++ b/trunk/arch/sparc/mm/srmmu.c
@@ -280,9 +280,7 @@ static void __init srmmu_nocache_init(void)
SRMMU_NOCACHE_ALIGN_MAX, 0UL);
memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
- srmmu_nocache_bitmap =
- __alloc_bootmem(BITS_TO_LONGS(bitmap_bits) * sizeof(long),
- SMP_CACHE_BYTES, 0UL);
+ srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
diff --git a/trunk/arch/sparc/mm/tlb.c b/trunk/arch/sparc/mm/tlb.c
index 272aa4f7657e..ba6ae7ffdc2c 100644
--- a/trunk/arch/sparc/mm/tlb.c
+++ b/trunk/arch/sparc/mm/tlb.c
@@ -24,17 +24,11 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
void flush_tlb_pending(void)
{
struct tlb_batch *tb = &get_cpu_var(tlb_batch);
- struct mm_struct *mm = tb->mm;
- if (!tb->tlb_nr)
- goto out;
+ if (tb->tlb_nr) {
+ flush_tsb_user(tb);
- flush_tsb_user(tb);
-
- if (CTX_VALID(mm->context)) {
- if (tb->tlb_nr == 1) {
- global_flush_tlb_page(mm, tb->vaddrs[0]);
- } else {
+ if (CTX_VALID(tb->mm->context)) {
#ifdef CONFIG_SMP
smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
&tb->vaddrs[0]);
@@ -43,30 +37,12 @@ void flush_tlb_pending(void)
tb->tlb_nr, &tb->vaddrs[0]);
#endif
}
+ tb->tlb_nr = 0;
}
- tb->tlb_nr = 0;
-
-out:
put_cpu_var(tlb_batch);
}
-void arch_enter_lazy_mmu_mode(void)
-{
- struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
-
- tb->active = 1;
-}
-
-void arch_leave_lazy_mmu_mode(void)
-{
- struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
-
- if (tb->tlb_nr)
- flush_tlb_pending();
- tb->active = 0;
-}
-
static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
bool exec)
{
@@ -84,12 +60,6 @@ static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
nr = 0;
}
- if (!tb->active) {
- global_flush_tlb_page(mm, vaddr);
- flush_tsb_user_page(mm, vaddr);
- return;
- }
-
if (nr == 0)
tb->mm = mm;
diff --git a/trunk/arch/sparc/mm/tsb.c b/trunk/arch/sparc/mm/tsb.c
index 2cc3bce5ee91..428982b9becf 100644
--- a/trunk/arch/sparc/mm/tsb.c
+++ b/trunk/arch/sparc/mm/tsb.c
@@ -7,10 +7,11 @@
#include
#include
#include
-#include
+#include
+#include
#include
+#include
#include
-#include
#include
extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES];
@@ -45,56 +46,28 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end)
}
}
-static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v,
- unsigned long hash_shift,
- unsigned long nentries)
-{
- unsigned long tag, ent, hash;
-
- v &= ~0x1UL;
- hash = tsb_hash(v, hash_shift, nentries);
- ent = tsb + (hash * sizeof(struct tsb));
- tag = (v >> 22UL);
-
- tsb_flush(ent, tag);
-}
-
static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift,
unsigned long tsb, unsigned long nentries)
{
unsigned long i;
- for (i = 0; i < tb->tlb_nr; i++)
- __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries);
-}
-
-void flush_tsb_user(struct tlb_batch *tb)
-{
- struct mm_struct *mm = tb->mm;
- unsigned long nentries, base, flags;
+ for (i = 0; i < tb->tlb_nr; i++) {
+ unsigned long v = tb->vaddrs[i];
+ unsigned long tag, ent, hash;
- spin_lock_irqsave(&mm->context.lock, flags);
+ v &= ~0x1UL;
- base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb;
- nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
- base = __pa(base);
- __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
+ hash = tsb_hash(v, hash_shift, nentries);
+ ent = tsb + (hash * sizeof(struct tsb));
+ tag = (v >> 22UL);
-#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
- if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
- base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb;
- nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
- if (tlb_type == cheetah_plus || tlb_type == hypervisor)
- base = __pa(base);
- __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
+ tsb_flush(ent, tag);
}
-#endif
- spin_unlock_irqrestore(&mm->context.lock, flags);
}
-void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
+void flush_tsb_user(struct tlb_batch *tb)
{
+ struct mm_struct *mm = tb->mm;
unsigned long nentries, base, flags;
spin_lock_irqsave(&mm->context.lock, flags);
@@ -103,7 +76,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries);
+ __flush_tsb_one(tb, PAGE_SHIFT, base, nentries);
#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
if (mm->context.tsb_block[MM_TSB_HUGE].tsb) {
@@ -111,7 +84,7 @@ void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr)
nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries;
if (tlb_type == cheetah_plus || tlb_type == hypervisor)
base = __pa(base);
- __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries);
+ __flush_tsb_one(tb, HPAGE_SHIFT, base, nentries);
}
#endif
spin_unlock_irqrestore(&mm->context.lock, flags);
diff --git a/trunk/arch/sparc/mm/ultra.S b/trunk/arch/sparc/mm/ultra.S
index 432aa0cb1b38..f8e13d421fcb 100644
--- a/trunk/arch/sparc/mm/ultra.S
+++ b/trunk/arch/sparc/mm/ultra.S
@@ -52,33 +52,6 @@ __flush_tlb_mm: /* 18 insns */
nop
nop
- .align 32
- .globl __flush_tlb_page
-__flush_tlb_page: /* 22 insns */
- /* %o0 = context, %o1 = vaddr */
- rdpr %pstate, %g7
- andn %g7, PSTATE_IE, %g2
- wrpr %g2, %pstate
- mov SECONDARY_CONTEXT, %o4
- ldxa [%o4] ASI_DMMU, %g2
- stxa %o0, [%o4] ASI_DMMU
- andcc %o1, 1, %g0
- andn %o1, 1, %o3
- be,pn %icc, 1f
- or %o3, 0x10, %o3
- stxa %g0, [%o3] ASI_IMMU_DEMAP
-1: stxa %g0, [%o3] ASI_DMMU_DEMAP
- membar #Sync
- stxa %g2, [%o4] ASI_DMMU
- sethi %hi(KERNBASE), %o4
- flush %o4
- retl
- wrpr %g7, 0x0, %pstate
- nop
- nop
- nop
- nop
-
.align 32
.globl __flush_tlb_pending
__flush_tlb_pending: /* 26 insns */
@@ -230,31 +203,6 @@ __cheetah_flush_tlb_mm: /* 19 insns */
retl
wrpr %g7, 0x0, %pstate
-__cheetah_flush_tlb_page: /* 22 insns */
- /* %o0 = context, %o1 = vaddr */
- rdpr %pstate, %g7
- andn %g7, PSTATE_IE, %g2
- wrpr %g2, 0x0, %pstate
- wrpr %g0, 1, %tl
- mov PRIMARY_CONTEXT, %o4
- ldxa [%o4] ASI_DMMU, %g2
- srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
- sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
- or %o0, %o3, %o0 /* Preserve nucleus page size fields */
- stxa %o0, [%o4] ASI_DMMU
- andcc %o1, 1, %g0
- be,pn %icc, 1f
- andn %o1, 1, %o3
- stxa %g0, [%o3] ASI_IMMU_DEMAP
-1: stxa %g0, [%o3] ASI_DMMU_DEMAP
- membar #Sync
- stxa %g2, [%o4] ASI_DMMU
- sethi %hi(KERNBASE), %o4
- flush %o4
- wrpr %g0, 0, %tl
- retl
- wrpr %g7, 0x0, %pstate
-
__cheetah_flush_tlb_pending: /* 27 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
rdpr %pstate, %g7
@@ -321,20 +269,6 @@ __hypervisor_flush_tlb_mm: /* 10 insns */
retl
nop
-__hypervisor_flush_tlb_page: /* 11 insns */
- /* %o0 = context, %o1 = vaddr */
- mov %o0, %g2
- mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
- mov %g2, %o1 /* ARG1: mmu context */
- mov HV_MMU_ALL, %o2 /* ARG2: flags */
- srlx %o0, PAGE_SHIFT, %o0
- sllx %o0, PAGE_SHIFT, %o0
- ta HV_MMU_UNMAP_ADDR_TRAP
- brnz,pn %o0, __hypervisor_tlb_tl0_error
- mov HV_MMU_UNMAP_ADDR_TRAP, %o1
- retl
- nop
-
__hypervisor_flush_tlb_pending: /* 16 insns */
/* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
sllx %o1, 3, %g1
@@ -405,13 +339,6 @@ cheetah_patch_cachetlbops:
call tlb_patch_one
mov 19, %o2
- sethi %hi(__flush_tlb_page), %o0
- or %o0, %lo(__flush_tlb_page), %o0
- sethi %hi(__cheetah_flush_tlb_page), %o1
- or %o1, %lo(__cheetah_flush_tlb_page), %o1
- call tlb_patch_one
- mov 22, %o2
-
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__cheetah_flush_tlb_pending), %o1
@@ -470,9 +397,10 @@ xcall_flush_tlb_mm: /* 21 insns */
nop
nop
- .globl xcall_flush_tlb_page
-xcall_flush_tlb_page: /* 17 insns */
- /* %g5=context, %g1=vaddr */
+ .globl xcall_flush_tlb_pending
+xcall_flush_tlb_pending: /* 21 insns */
+ /* %g5=context, %g1=nr, %g7=vaddrs[] */
+ sllx %g1, 3, %g1
mov PRIMARY_CONTEXT, %g4
ldxa [%g4] ASI_DMMU, %g2
srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
@@ -480,16 +408,20 @@ xcall_flush_tlb_page: /* 17 insns */
or %g5, %g4, %g5
mov PRIMARY_CONTEXT, %g4
stxa %g5, [%g4] ASI_DMMU
- andcc %g1, 0x1, %g0
+1: sub %g1, (1 << 3), %g1
+ ldx [%g7 + %g1], %g5
+ andcc %g5, 0x1, %g0
be,pn %icc, 2f
- andn %g1, 0x1, %g5
+
+ andn %g5, 0x1, %g5
stxa %g0, [%g5] ASI_IMMU_DEMAP
2: stxa %g0, [%g5] ASI_DMMU_DEMAP
membar #Sync
+ brnz,pt %g1, 1b
+ nop
stxa %g2, [%g4] ASI_DMMU
retry
nop
- nop
.globl xcall_flush_tlb_kernel_range
xcall_flush_tlb_kernel_range: /* 25 insns */
@@ -724,13 +656,15 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
membar #Sync
retry
- .globl __hypervisor_xcall_flush_tlb_page
-__hypervisor_xcall_flush_tlb_page: /* 17 insns */
- /* %g5=ctx, %g1=vaddr */
+ .globl __hypervisor_xcall_flush_tlb_pending
+__hypervisor_xcall_flush_tlb_pending: /* 21 insns */
+ /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */
+ sllx %g1, 3, %g1
mov %o0, %g2
mov %o1, %g3
mov %o2, %g4
- mov %g1, %o0 /* ARG0: virtual address */
+1: sub %g1, (1 << 3), %g1
+ ldx [%g7 + %g1], %o0 /* ARG0: virtual address */
mov %g5, %o1 /* ARG1: mmu context */
mov HV_MMU_ALL, %o2 /* ARG2: flags */
srlx %o0, PAGE_SHIFT, %o0
@@ -739,6 +673,8 @@ __hypervisor_xcall_flush_tlb_page: /* 17 insns */
mov HV_MMU_UNMAP_ADDR_TRAP, %g6
brnz,a,pn %o0, __hypervisor_tlb_xcall_error
mov %o0, %g5
+ brnz,pt %g1, 1b
+ nop
mov %g2, %o0
mov %g3, %o1
mov %g4, %o2
@@ -821,13 +757,6 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one
mov 10, %o2
- sethi %hi(__flush_tlb_page), %o0
- or %o0, %lo(__flush_tlb_page), %o0
- sethi %hi(__hypervisor_flush_tlb_page), %o1
- or %o1, %lo(__hypervisor_flush_tlb_page), %o1
- call tlb_patch_one
- mov 11, %o2
-
sethi %hi(__flush_tlb_pending), %o0
or %o0, %lo(__flush_tlb_pending), %o0
sethi %hi(__hypervisor_flush_tlb_pending), %o1
@@ -859,12 +788,12 @@ hypervisor_patch_cachetlbops:
call tlb_patch_one
mov 21, %o2
- sethi %hi(xcall_flush_tlb_page), %o0
- or %o0, %lo(xcall_flush_tlb_page), %o0
- sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
- or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
+ sethi %hi(xcall_flush_tlb_pending), %o0
+ or %o0, %lo(xcall_flush_tlb_pending), %o0
+ sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1
+ or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1
call tlb_patch_one
- mov 17, %o2
+ mov 21, %o2
sethi %hi(xcall_flush_tlb_kernel_range), %o0
or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
diff --git a/trunk/arch/tile/include/asm/irqflags.h b/trunk/arch/tile/include/asm/irqflags.h
index c96f9bbb760d..241c0bb60b12 100644
--- a/trunk/arch/tile/include/asm/irqflags.h
+++ b/trunk/arch/tile/include/asm/irqflags.h
@@ -40,15 +40,7 @@
#include
#include
-/*
- * Set and clear kernel interrupt masks.
- *
- * NOTE: __insn_mtspr() is a compiler builtin marked as a memory
- * clobber. We rely on it being equivalent to a compiler barrier in
- * this code since arch_local_irq_save() and friends must act as
- * compiler barriers. This compiler semantic is baked into enough
- * places that the compiler will maintain it going forward.
- */
+/* Set and clear kernel interrupt masks. */
#if CHIP_HAS_SPLIT_INTR_MASK()
#if INT_PERF_COUNT < 32 || INT_AUX_PERF_COUNT < 32 || INT_MEM_ERROR >= 32
# error Fix assumptions about which word various interrupts are in
diff --git a/trunk/arch/tile/kernel/setup.c b/trunk/arch/tile/kernel/setup.c
index 7a5aa1a7864e..d1e15f7b59c6 100644
--- a/trunk/arch/tile/kernel/setup.c
+++ b/trunk/arch/tile/kernel/setup.c
@@ -1004,8 +1004,15 @@ void __cpuinit setup_cpu(int boot)
#ifdef CONFIG_BLK_DEV_INITRD
+/*
+ * Note that the kernel can potentially support other compression
+ * techniques than gz, though we don't do so by default. If we ever
+ * decide to do so we can either look for other filename extensions,
+ * or just allow a file with this name to be compressed with an
+ * arbitrary compressor (somewhat counterintuitively).
+ */
static int __initdata set_initramfs_file;
-static char __initdata initramfs_file[128] = "initramfs";
+static char __initdata initramfs_file[128] = "initramfs.cpio.gz";
static int __init setup_initramfs_file(char *str)
{
@@ -1019,9 +1026,9 @@ static int __init setup_initramfs_file(char *str)
early_param("initramfs_file", setup_initramfs_file);
/*
- * We look for a file called "initramfs" in the hvfs. If there is one, we
- * allocate some memory for it and it will be unpacked to the initramfs.
- * If it's compressed, the initd code will uncompress it first.
+ * We look for an "initramfs.cpio.gz" file in the hvfs.
+ * If there is one, we allocate some memory for it and it will be
+ * unpacked to the initramfs.
*/
static void __init load_hv_initrd(void)
{
@@ -1031,16 +1038,10 @@ static void __init load_hv_initrd(void)
fd = hv_fs_findfile((HV_VirtAddr) initramfs_file);
if (fd == HV_ENOENT) {
- if (set_initramfs_file) {
+ if (set_initramfs_file)
pr_warning("No such hvfs initramfs file '%s'\n",
initramfs_file);
- return;
- } else {
- /* Try old backwards-compatible name. */
- fd = hv_fs_findfile((HV_VirtAddr)"initramfs.cpio.gz");
- if (fd == HV_ENOENT)
- return;
- }
+ return;
}
BUG_ON(fd < 0);
stat = hv_fs_fstat(fd);
diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig
index 15b5cef4aa38..70c0f3da0476 100644
--- a/trunk/arch/x86/Kconfig
+++ b/trunk/arch/x86/Kconfig
@@ -1549,7 +1549,6 @@ config X86_SMAP
config EFI
bool "EFI runtime service support"
depends on ACPI
- select UCS2_STRING
---help---
This enables the kernel to use EFI runtime services that are
available (such as the EFI variable services).
diff --git a/trunk/arch/x86/boot/compressed/Makefile b/trunk/arch/x86/boot/compressed/Makefile
index 5ef205c5f37b..8a84501acb1b 100644
--- a/trunk/arch/x86/boot/compressed/Makefile
+++ b/trunk/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
# create a compressed vmlinux image from the original vmlinux
#
-targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo
+targets := vmlinux.lds vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma vmlinux.bin.xz vmlinux.bin.lzo head_$(BITS).o misc.o string.o cmdline.o early_serial_console.o piggy.o
KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -29,6 +29,7 @@ VMLINUX_OBJS = $(obj)/vmlinux.lds $(obj)/head_$(BITS).o $(obj)/misc.o \
$(obj)/piggy.o
$(obj)/eboot.o: KBUILD_CFLAGS += -fshort-wchar -mno-red-zone
+$(obj)/efi_stub_$(BITS).o: KBUILD_CLFAGS += -fshort-wchar -mno-red-zone
ifeq ($(CONFIG_EFI_STUB), y)
VMLINUX_OBJS += $(obj)/eboot.o $(obj)/efi_stub_$(BITS).o
@@ -42,7 +43,7 @@ OBJCOPYFLAGS_vmlinux.bin := -R .comment -S
$(obj)/vmlinux.bin: vmlinux FORCE
$(call if_changed,objcopy)
-targets += $(patsubst $(obj)/%,%,$(VMLINUX_OBJS)) vmlinux.bin.all vmlinux.relocs
+targets += vmlinux.bin.all vmlinux.relocs
CMD_RELOCS = arch/x86/tools/relocs
quiet_cmd_relocs = RELOCS $@
diff --git a/trunk/arch/x86/boot/compressed/eboot.c b/trunk/arch/x86/boot/compressed/eboot.c
index 8615f7581820..c205035a6b96 100644
--- a/trunk/arch/x86/boot/compressed/eboot.c
+++ b/trunk/arch/x86/boot/compressed/eboot.c
@@ -251,51 +251,6 @@ static void find_bits(unsigned long mask, u8 *pos, u8 *size)
*size = len;
}
-static efi_status_t setup_efi_vars(struct boot_params *params)
-{
- struct setup_data *data;
- struct efi_var_bootdata *efidata;
- u64 store_size, remaining_size, var_size;
- efi_status_t status;
-
- if (!sys_table->runtime->query_variable_info)
- return EFI_UNSUPPORTED;
-
- data = (struct setup_data *)(unsigned long)params->hdr.setup_data;
-
- while (data && data->next)
- data = (struct setup_data *)(unsigned long)data->next;
-
- status = efi_call_phys4(sys_table->runtime->query_variable_info,
- EFI_VARIABLE_NON_VOLATILE |
- EFI_VARIABLE_BOOTSERVICE_ACCESS |
- EFI_VARIABLE_RUNTIME_ACCESS, &store_size,
- &remaining_size, &var_size);
-
- if (status != EFI_SUCCESS)
- return status;
-
- status = efi_call_phys3(sys_table->boottime->allocate_pool,
- EFI_LOADER_DATA, sizeof(*efidata), &efidata);
-
- if (status != EFI_SUCCESS)
- return status;
-
- efidata->data.type = SETUP_EFI_VARS;
- efidata->data.len = sizeof(struct efi_var_bootdata) -
- sizeof(struct setup_data);
- efidata->data.next = 0;
- efidata->store_size = store_size;
- efidata->remaining_size = remaining_size;
- efidata->max_var_size = var_size;
-
- if (data)
- data->next = (unsigned long)efidata;
- else
- params->hdr.setup_data = (unsigned long)efidata;
-
-}
-
static efi_status_t setup_efi_pci(struct boot_params *params)
{
efi_pci_io_protocol *pci;
@@ -1202,8 +1157,6 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table,
setup_graphics(boot_params);
- setup_efi_vars(boot_params);
-
setup_efi_pci(boot_params);
status = efi_call_phys3(sys_table->boottime->allocate_pool,
diff --git a/trunk/arch/x86/include/asm/efi.h b/trunk/arch/x86/include/asm/efi.h
index 2fb5d5884e23..60c89f30c727 100644
--- a/trunk/arch/x86/include/asm/efi.h
+++ b/trunk/arch/x86/include/asm/efi.h
@@ -102,13 +102,6 @@ extern void efi_call_phys_epilog(void);
extern void efi_unmap_memmap(void);
extern void efi_memory_uc(u64 addr, unsigned long size);
-struct efi_var_bootdata {
- struct setup_data data;
- u64 store_size;
- u64 remaining_size;
- u64 max_var_size;
-};
-
#ifdef CONFIG_EFI
static inline bool efi_is_native(void)
diff --git a/trunk/arch/x86/include/asm/paravirt.h b/trunk/arch/x86/include/asm/paravirt.h
index 7361e47db79f..5edd1742cfd0 100644
--- a/trunk/arch/x86/include/asm/paravirt.h
+++ b/trunk/arch/x86/include/asm/paravirt.h
@@ -703,10 +703,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
-static inline void arch_flush_lazy_mmu_mode(void)
-{
- PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush);
-}
+void arch_flush_lazy_mmu_mode(void);
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
phys_addr_t phys, pgprot_t flags)
diff --git a/trunk/arch/x86/include/asm/paravirt_types.h b/trunk/arch/x86/include/asm/paravirt_types.h
index b3b0ec1dac86..142236ed83af 100644
--- a/trunk/arch/x86/include/asm/paravirt_types.h
+++ b/trunk/arch/x86/include/asm/paravirt_types.h
@@ -91,7 +91,6 @@ struct pv_lazy_ops {
/* Set deferred update mode, used for batching operations. */
void (*enter)(void);
void (*leave)(void);
- void (*flush)(void);
};
struct pv_time_ops {
@@ -680,7 +679,6 @@ void paravirt_end_context_switch(struct task_struct *next);
void paravirt_enter_lazy_mmu(void);
void paravirt_leave_lazy_mmu(void);
-void paravirt_flush_lazy_mmu(void);
void _paravirt_nop(void);
u32 _paravirt_ident_32(u32);
diff --git a/trunk/arch/x86/include/asm/syscall.h b/trunk/arch/x86/include/asm/syscall.h
index 2e188d68397c..1ace47b62592 100644
--- a/trunk/arch/x86/include/asm/syscall.h
+++ b/trunk/arch/x86/include/asm/syscall.h
@@ -29,13 +29,13 @@ extern const unsigned long sys_call_table[];
*/
static inline int syscall_get_nr(struct task_struct *task, struct pt_regs *regs)
{
- return regs->orig_ax;
+ return regs->orig_ax & __SYSCALL_MASK;
}
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
- regs->ax = regs->orig_ax;
+ regs->ax = regs->orig_ax & __SYSCALL_MASK;
}
static inline long syscall_get_error(struct task_struct *task,
diff --git a/trunk/arch/x86/include/asm/tlb.h b/trunk/arch/x86/include/asm/tlb.h
index c7797307fc2b..4fef20773b8f 100644
--- a/trunk/arch/x86/include/asm/tlb.h
+++ b/trunk/arch/x86/include/asm/tlb.h
@@ -7,7 +7,7 @@
#define tlb_flush(tlb) \
{ \
- if (!tlb->fullmm && !tlb->need_flush_all) \
+ if (tlb->fullmm == 0) \
flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, 0UL); \
else \
flush_tlb_mm_range(tlb->mm, 0UL, TLB_FLUSH_ALL, 0UL); \
diff --git a/trunk/arch/x86/include/uapi/asm/bootparam.h b/trunk/arch/x86/include/uapi/asm/bootparam.h
index 08744242b8d2..c15ddaf90710 100644
--- a/trunk/arch/x86/include/uapi/asm/bootparam.h
+++ b/trunk/arch/x86/include/uapi/asm/bootparam.h
@@ -6,7 +6,6 @@
#define SETUP_E820_EXT 1
#define SETUP_DTB 2
#define SETUP_PCI 3
-#define SETUP_EFI_VARS 4
/* ram_size flags */
#define RAMDISK_IMAGE_START_MASK 0x07FF
diff --git a/trunk/arch/x86/kernel/cpu/mshyperv.c b/trunk/arch/x86/kernel/cpu/mshyperv.c
index 8f4be53ea04b..a7d26d83fb70 100644
--- a/trunk/arch/x86/kernel/cpu/mshyperv.c
+++ b/trunk/arch/x86/kernel/cpu/mshyperv.c
@@ -35,6 +35,13 @@ static bool __init ms_hyperv_platform(void)
if (!boot_cpu_has(X86_FEATURE_HYPERVISOR))
return false;
+ /*
+ * Xen emulates Hyper-V to support enlightened Windows.
+ * Check to see first if we are on a Xen Hypervisor.
+ */
+ if (xen_cpuid_base())
+ return false;
+
cpuid(HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS,
&eax, &hyp_signature[0], &hyp_signature[1], &hyp_signature[2]);
@@ -75,6 +82,12 @@ static void __init ms_hyperv_init_platform(void)
if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE)
clocksource_register_hz(&hyperv_cs, NSEC_PER_SEC/100);
+#if IS_ENABLED(CONFIG_HYPERV)
+ /*
+ * Setup the IDT for hypervisor callback.
+ */
+ alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
+#endif
}
const __refconst struct hypervisor_x86 x86_hyper_ms_hyperv = {
@@ -90,11 +103,6 @@ static irq_handler_t vmbus_isr;
void hv_register_vmbus_handler(int irq, irq_handler_t handler)
{
- /*
- * Setup the IDT for hypervisor callback.
- */
- alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, hyperv_callback_vector);
-
vmbus_irq = irq;
vmbus_isr = handler;
}
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel.c b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
index cc45deb791b0..dab7580c47ae 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel.c
@@ -153,14 +153,8 @@ static struct event_constraint intel_gen_event_constraints[] __read_mostly =
};
static struct extra_reg intel_snb_extra_regs[] __read_mostly = {
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3f807f8fffull, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3f807f8fffull, RSP_1),
- EVENT_EXTRA_END
-};
-
-static struct extra_reg intel_snbep_extra_regs[] __read_mostly = {
- INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffff8fffull, RSP_0),
- INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffff8fffull, RSP_1),
+ INTEL_EVENT_EXTRA_REG(0xb7, MSR_OFFCORE_RSP_0, 0x3fffffffffull, RSP_0),
+ INTEL_EVENT_EXTRA_REG(0xbb, MSR_OFFCORE_RSP_1, 0x3fffffffffull, RSP_1),
EVENT_EXTRA_END
};
@@ -2103,10 +2097,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_snb_event_constraints;
x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- if (boot_cpu_data.x86_model == 45)
- x86_pmu.extra_regs = intel_snbep_extra_regs;
- else
- x86_pmu.extra_regs = intel_snb_extra_regs;
+ x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
@@ -2132,10 +2123,7 @@ __init int intel_pmu_init(void)
x86_pmu.event_constraints = intel_ivb_event_constraints;
x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
- if (boot_cpu_data.x86_model == 62)
- x86_pmu.extra_regs = intel_snbep_extra_regs;
- else
- x86_pmu.extra_regs = intel_snb_extra_regs;
+ x86_pmu.extra_regs = intel_snb_extra_regs;
/* all extra regs are per-cpu when HT is on */
x86_pmu.er_flags |= ERF_HAS_RSP_1;
x86_pmu.er_flags |= ERF_NO_HT_SHARING;
diff --git a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 26830f3af0df..b05a575d56f4 100644
--- a/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/trunk/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -314,11 +314,10 @@ int intel_pmu_drain_bts_buffer(void)
if (top <= at)
return 0;
- memset(®s, 0, sizeof(regs));
-
ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0, event->hw.last_period);
+ regs.ip = 0;
/*
* Prepare a generic sample, i.e. fill in the invariant fields.
diff --git a/trunk/arch/x86/kernel/microcode_core_early.c b/trunk/arch/x86/kernel/microcode_core_early.c
index 833d51d6ee06..577db8417d15 100644
--- a/trunk/arch/x86/kernel/microcode_core_early.c
+++ b/trunk/arch/x86/kernel/microcode_core_early.c
@@ -45,6 +45,9 @@ static int __cpuinit x86_vendor(void)
u32 eax = 0x00000000;
u32 ebx, ecx = 0, edx;
+ if (!have_cpuid_p())
+ return X86_VENDOR_UNKNOWN;
+
native_cpuid(&eax, &ebx, &ecx, &edx);
if (CPUID_IS(CPUID_INTEL1, CPUID_INTEL2, CPUID_INTEL3, ebx, ecx, edx))
@@ -56,45 +59,18 @@ static int __cpuinit x86_vendor(void)
return X86_VENDOR_UNKNOWN;
}
-static int __cpuinit x86_family(void)
-{
- u32 eax = 0x00000001;
- u32 ebx, ecx = 0, edx;
- int x86;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-
- x86 = (eax >> 8) & 0xf;
- if (x86 == 15)
- x86 += (eax >> 20) & 0xff;
-
- return x86;
-}
-
void __init load_ucode_bsp(void)
{
- int vendor, x86;
-
- if (!have_cpuid_p())
- return;
+ int vendor = x86_vendor();
- vendor = x86_vendor();
- x86 = x86_family();
-
- if (vendor == X86_VENDOR_INTEL && x86 >= 6)
+ if (vendor == X86_VENDOR_INTEL)
load_ucode_intel_bsp();
}
void __cpuinit load_ucode_ap(void)
{
- int vendor, x86;
-
- if (!have_cpuid_p())
- return;
-
- vendor = x86_vendor();
- x86 = x86_family();
+ int vendor = x86_vendor();
- if (vendor == X86_VENDOR_INTEL && x86 >= 6)
+ if (vendor == X86_VENDOR_INTEL)
load_ucode_intel_ap();
}
diff --git a/trunk/arch/x86/kernel/paravirt.c b/trunk/arch/x86/kernel/paravirt.c
index 8bfb335f74bb..17fff18a1031 100644
--- a/trunk/arch/x86/kernel/paravirt.c
+++ b/trunk/arch/x86/kernel/paravirt.c
@@ -263,18 +263,6 @@ void paravirt_leave_lazy_mmu(void)
leave_lazy(PARAVIRT_LAZY_MMU);
}
-void paravirt_flush_lazy_mmu(void)
-{
- preempt_disable();
-
- if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
- arch_leave_lazy_mmu_mode();
- arch_enter_lazy_mmu_mode();
- }
-
- preempt_enable();
-}
-
void paravirt_start_context_switch(struct task_struct *prev)
{
BUG_ON(preemptible());
@@ -304,6 +292,18 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return this_cpu_read(paravirt_lazy_mode);
}
+void arch_flush_lazy_mmu_mode(void)
+{
+ preempt_disable();
+
+ if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
+ arch_leave_lazy_mmu_mode();
+ arch_enter_lazy_mmu_mode();
+ }
+
+ preempt_enable();
+}
+
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,
@@ -475,7 +475,6 @@ struct pv_mmu_ops pv_mmu_ops = {
.lazy_mode = {
.enter = paravirt_nop,
.leave = paravirt_nop,
- .flush = paravirt_nop,
},
.set_fixmap = native_set_fixmap,
diff --git a/trunk/arch/x86/kernel/setup.c b/trunk/arch/x86/kernel/setup.c
index fae9134a2de9..90d8cc930f5e 100644
--- a/trunk/arch/x86/kernel/setup.c
+++ b/trunk/arch/x86/kernel/setup.c
@@ -507,14 +507,11 @@ static void __init memblock_x86_reserve_range_setup_data(void)
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
- * On 64bit, old kexec-tools need to under 896MiB.
*/
#ifdef CONFIG_X86_32
-# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
-# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
+# define CRASH_KERNEL_ADDR_MAX (512 << 20)
#else
-# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
-# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
+# define CRASH_KERNEL_ADDR_MAX MAXMEM
#endif
static void __init reserve_crashkernel_low(void)
@@ -524,35 +521,19 @@ static void __init reserve_crashkernel_low(void)
unsigned long long low_base = 0, low_size = 0;
unsigned long total_low_mem;
unsigned long long base;
- bool auto_set = false;
int ret;
total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
- /* crashkernel=Y,low */
ret = parse_crashkernel_low(boot_command_line, total_low_mem,
&low_size, &base);
- if (ret != 0) {
- /*
- * two parts from lib/swiotlb.c:
- * swiotlb size: user specified with swiotlb= or default.
- * swiotlb overflow buffer: now is hardcoded to 32k.
- * We round it to 8M for other buffers that
- * may need to stay low too.
- */
- low_size = swiotlb_size_or_default() + (8UL<<20);
- auto_set = true;
- } else {
- /* passed with crashkernel=0,low ? */
- if (!low_size)
- return;
- }
+ if (ret != 0 || low_size <= 0)
+ return;
low_base = memblock_find_in_range(low_size, (1ULL<<32),
low_size, alignment);
if (!low_base) {
- if (!auto_set)
- pr_info("crashkernel low reservation failed - No suitable area found.\n");
+ pr_info("crashkernel low reservation failed - No suitable area found.\n");
return;
}
@@ -573,22 +554,14 @@ static void __init reserve_crashkernel(void)
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long total_mem;
unsigned long long crash_size, crash_base;
- bool high = false;
int ret;
total_mem = memblock_phys_mem_size();
- /* crashkernel=XM */
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
- if (ret != 0 || crash_size <= 0) {
- /* crashkernel=X,high */
- ret = parse_crashkernel_high(boot_command_line, total_mem,
- &crash_size, &crash_base);
- if (ret != 0 || crash_size <= 0)
- return;
- high = true;
- }
+ if (ret != 0 || crash_size <= 0)
+ return;
/* 0 means: find the address automatically */
if (crash_base <= 0) {
@@ -596,9 +569,7 @@ static void __init reserve_crashkernel(void)
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
*/
crash_base = memblock_find_in_range(alignment,
- high ? CRASH_KERNEL_ADDR_HIGH_MAX :
- CRASH_KERNEL_ADDR_LOW_MAX,
- crash_size, alignment);
+ CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
diff --git a/trunk/arch/x86/kvm/lapic.c b/trunk/arch/x86/kvm/lapic.c
index f77df1c5de6e..02b51dd4e4ad 100644
--- a/trunk/arch/x86/kvm/lapic.c
+++ b/trunk/arch/x86/kvm/lapic.c
@@ -1857,7 +1857,7 @@ int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data)
if (!pv_eoi_enabled(vcpu))
return 0;
return kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.pv_eoi.data,
- addr, sizeof(u8));
+ addr);
}
void kvm_lapic_init(void)
diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c
index e1721324c271..f19ac0aca60d 100644
--- a/trunk/arch/x86/kvm/x86.c
+++ b/trunk/arch/x86/kvm/x86.c
@@ -1823,8 +1823,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
return 0;
}
- if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa,
- sizeof(u32)))
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa))
return 1;
vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS);
@@ -1953,9 +1952,12 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
gpa_offset = data & ~(PAGE_MASK | 1);
+ /* Check that the address is 32-byte aligned. */
+ if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+ break;
+
if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
- &vcpu->arch.pv_time, data & ~1ULL,
- sizeof(struct pvclock_vcpu_time_info)))
+ &vcpu->arch.pv_time, data & ~1ULL))
vcpu->arch.pv_time_enabled = false;
else
vcpu->arch.pv_time_enabled = true;
@@ -1975,8 +1977,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 1;
if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.st.stime,
- data & KVM_STEAL_VALID_BITS,
- sizeof(struct kvm_steal_time)))
+ data & KVM_STEAL_VALID_BITS))
return 1;
vcpu->arch.st.msr_val = data;
diff --git a/trunk/arch/x86/lguest/boot.c b/trunk/arch/x86/lguest/boot.c
index 7114c63f047d..1cbd89ca5569 100644
--- a/trunk/arch/x86/lguest/boot.c
+++ b/trunk/arch/x86/lguest/boot.c
@@ -1334,7 +1334,6 @@ __init void lguest_init(void)
pv_mmu_ops.read_cr3 = lguest_read_cr3;
pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode;
- pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu;
pv_mmu_ops.pte_update = lguest_pte_update;
pv_mmu_ops.pte_update_defer = lguest_pte_update;
diff --git a/trunk/arch/x86/mm/fault.c b/trunk/arch/x86/mm/fault.c
index 0e883364abb5..2b97525246d4 100644
--- a/trunk/arch/x86/mm/fault.c
+++ b/trunk/arch/x86/mm/fault.c
@@ -378,12 +378,10 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
if (pgd_none(*pgd_ref))
return -1;
- if (pgd_none(*pgd)) {
+ if (pgd_none(*pgd))
set_pgd(pgd, *pgd_ref);
- arch_flush_lazy_mmu_mode();
- } else {
+ else
BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
- }
/*
* Below here mismatches are bugs because these lower tables
diff --git a/trunk/arch/x86/mm/pageattr-test.c b/trunk/arch/x86/mm/pageattr-test.c
index 0e38951e65eb..b0086567271c 100644
--- a/trunk/arch/x86/mm/pageattr-test.c
+++ b/trunk/arch/x86/mm/pageattr-test.c
@@ -68,7 +68,7 @@ static int print_split(struct split_state *s)
s->gpg++;
i += GPS/PAGE_SIZE;
} else if (level == PG_LEVEL_2M) {
- if ((pte_val(*pte) & _PAGE_PRESENT) && !(pte_val(*pte) & _PAGE_PSE)) {
+ if (!(pte_val(*pte) & _PAGE_PSE)) {
printk(KERN_ERR
"%lx level %d but not PSE %Lx\n",
addr, level, (u64)pte_val(*pte));
diff --git a/trunk/arch/x86/mm/pageattr.c b/trunk/arch/x86/mm/pageattr.c
index fb4e73ec24d8..091934e1d0d9 100644
--- a/trunk/arch/x86/mm/pageattr.c
+++ b/trunk/arch/x86/mm/pageattr.c
@@ -467,7 +467,7 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* We are safe now. Check whether the new pgprot is the same:
*/
old_pte = *kpte;
- old_prot = req_prot = pte_pgprot(old_pte);
+ old_prot = new_prot = req_prot = pte_pgprot(old_pte);
pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr);
pgprot_val(req_prot) |= pgprot_val(cpa->mask_set);
@@ -478,12 +478,12 @@ try_preserve_large_page(pte_t *kpte, unsigned long address,
* a non present pmd. The canon_pgprot will clear _PAGE_GLOBAL
* for the ancient hardware that doesn't support it.
*/
- if (pgprot_val(req_prot) & _PAGE_PRESENT)
- pgprot_val(req_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
+ if (pgprot_val(new_prot) & _PAGE_PRESENT)
+ pgprot_val(new_prot) |= _PAGE_PSE | _PAGE_GLOBAL;
else
- pgprot_val(req_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
+ pgprot_val(new_prot) &= ~(_PAGE_PSE | _PAGE_GLOBAL);
- req_prot = canon_pgprot(req_prot);
+ new_prot = canon_pgprot(new_prot);
/*
* old_pte points to the large page base address. So we need
@@ -1413,8 +1413,6 @@ void kernel_map_pages(struct page *page, int numpages, int enable)
* but that can deadlock->flush only current cpu:
*/
__flush_tlb_all();
-
- arch_flush_lazy_mmu_mode();
}
#ifdef CONFIG_HIBERNATION
diff --git a/trunk/arch/x86/mm/pgtable.c b/trunk/arch/x86/mm/pgtable.c
index 17fda6a8b3c2..193350b51f90 100644
--- a/trunk/arch/x86/mm/pgtable.c
+++ b/trunk/arch/x86/mm/pgtable.c
@@ -58,13 +58,6 @@ void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
{
paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
- /*
- * NOTE! For PAE, any changes to the top page-directory-pointer-table
- * entries need a full cr3 reload to flush.
- */
-#ifdef CONFIG_X86_PAE
- tlb->need_flush_all = 1;
-#endif
tlb_remove_page(tlb, virt_to_page(pmd));
}
diff --git a/trunk/arch/x86/platform/efi/efi.c b/trunk/arch/x86/platform/efi/efi.c
index e4a86a677ce1..5f2ecaf3f9d8 100644
--- a/trunk/arch/x86/platform/efi/efi.c
+++ b/trunk/arch/x86/platform/efi/efi.c
@@ -41,7 +41,6 @@
#include
#include
#include
-#include
#include
#include
@@ -52,13 +51,6 @@
#define EFI_DEBUG 1
-/*
- * There's some additional metadata associated with each
- * variable. Intel's reference implementation is 60 bytes - bump that
- * to account for potential alignment constraints
- */
-#define VAR_METADATA_SIZE 64
-
struct efi __read_mostly efi = {
.mps = EFI_INVALID_TABLE_ADDR,
.acpi = EFI_INVALID_TABLE_ADDR,
@@ -77,13 +69,6 @@ struct efi_memory_map memmap;
static struct efi efi_phys __initdata;
static efi_system_table_t efi_systab __initdata;
-static u64 efi_var_store_size;
-static u64 efi_var_remaining_size;
-static u64 efi_var_max_var_size;
-static u64 boot_used_size;
-static u64 boot_var_size;
-static u64 active_size;
-
unsigned long x86_efi_facility;
/*
@@ -113,15 +98,6 @@ static int __init setup_add_efi_memmap(char *arg)
}
early_param("add_efi_memmap", setup_add_efi_memmap);
-static bool efi_no_storage_paranoia;
-
-static int __init setup_storage_paranoia(char *arg)
-{
- efi_no_storage_paranoia = true;
- return 0;
-}
-early_param("efi_no_storage_paranoia", setup_storage_paranoia);
-
static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
@@ -186,53 +162,8 @@ static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
- efi_status_t status;
- static bool finished = false;
- static u64 var_size;
-
- status = efi_call_virt3(get_next_variable,
- name_size, name, vendor);
-
- if (status == EFI_NOT_FOUND) {
- finished = true;
- if (var_size < boot_used_size) {
- boot_var_size = boot_used_size - var_size;
- active_size += boot_var_size;
- } else {
- printk(KERN_WARNING FW_BUG "efi: Inconsistent initial sizes\n");
- }
- }
-
- if (boot_used_size && !finished) {
- unsigned long size;
- u32 attr;
- efi_status_t s;
- void *tmp;
-
- s = virt_efi_get_variable(name, vendor, &attr, &size, NULL);
-
- if (s != EFI_BUFFER_TOO_SMALL || !size)
- return status;
-
- tmp = kmalloc(size, GFP_ATOMIC);
-
- if (!tmp)
- return status;
-
- s = virt_efi_get_variable(name, vendor, &attr, &size, tmp);
-
- if (s == EFI_SUCCESS && (attr & EFI_VARIABLE_NON_VOLATILE)) {
- var_size += size;
- var_size += ucs2_strsize(name, 1024);
- active_size += size;
- active_size += VAR_METADATA_SIZE;
- active_size += ucs2_strsize(name, 1024);
- }
-
- kfree(tmp);
- }
-
- return status;
+ return efi_call_virt3(get_next_variable,
+ name_size, name, vendor);
}
static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -241,34 +172,9 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
unsigned long data_size,
void *data)
{
- efi_status_t status;
- u32 orig_attr = 0;
- unsigned long orig_size = 0;
-
- status = virt_efi_get_variable(name, vendor, &orig_attr, &orig_size,
- NULL);
-
- if (status != EFI_BUFFER_TOO_SMALL)
- orig_size = 0;
-
- status = efi_call_virt5(set_variable,
- name, vendor, attr,
- data_size, data);
-
- if (status == EFI_SUCCESS) {
- if (orig_size) {
- active_size -= orig_size;
- active_size -= ucs2_strsize(name, 1024);
- active_size -= VAR_METADATA_SIZE;
- }
- if (data_size) {
- active_size += data_size;
- active_size += ucs2_strsize(name, 1024);
- active_size += VAR_METADATA_SIZE;
- }
- }
-
- return status;
+ return efi_call_virt5(set_variable,
+ name, vendor, attr,
+ data_size, data);
}
static efi_status_t virt_efi_query_variable_info(u32 attr,
@@ -776,9 +682,6 @@ void __init efi_init(void)
char vendor[100] = "unknown";
int i = 0;
void *tmp;
- struct setup_data *data;
- struct efi_var_bootdata *efi_var_data;
- u64 pa_data;
#ifdef CONFIG_X86_32
if (boot_params.efi_info.efi_systab_hi ||
@@ -796,22 +699,6 @@ void __init efi_init(void)
if (efi_systab_init(efi_phys.systab))
return;
- pa_data = boot_params.hdr.setup_data;
- while (pa_data) {
- data = early_ioremap(pa_data, sizeof(*efi_var_data));
- if (data->type == SETUP_EFI_VARS) {
- efi_var_data = (struct efi_var_bootdata *)data;
-
- efi_var_store_size = efi_var_data->store_size;
- efi_var_remaining_size = efi_var_data->remaining_size;
- efi_var_max_var_size = efi_var_data->max_var_size;
- }
- pa_data = data->next;
- early_iounmap(data, sizeof(*efi_var_data));
- }
-
- boot_used_size = efi_var_store_size - efi_var_remaining_size;
-
set_bit(EFI_SYSTEM_TABLES, &x86_efi_facility);
/*
@@ -1112,48 +999,3 @@ u64 efi_mem_attributes(unsigned long phys_addr)
}
return 0;
}
-
-/*
- * Some firmware has serious problems when using more than 50% of the EFI
- * variable store, i.e. it triggers bugs that can brick machines. Ensure that
- * we never use more than this safe limit.
- *
- * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
- * store.
- */
-efi_status_t efi_query_variable_store(u32 attributes, unsigned long size)
-{
- efi_status_t status;
- u64 storage_size, remaining_size, max_size;
-
- status = efi.query_variable_info(attributes, &storage_size,
- &remaining_size, &max_size);
- if (status != EFI_SUCCESS)
- return status;
-
- if (!max_size && remaining_size > size)
- printk_once(KERN_ERR FW_BUG "Broken EFI implementation"
- " is returning MaxVariableSize=0\n");
- /*
- * Some firmware implementations refuse to boot if there's insufficient
- * space in the variable store. We account for that by refusing the
- * write if permitting it would reduce the available space to under
- * 50%. However, some firmware won't reclaim variable space until
- * after the used (not merely the actively used) space drops below
- * a threshold. We can approximate that case with the value calculated
- * above. If both the firmware and our calculations indicate that the
- * available space would drop below 50%, refuse the write.
- */
-
- if (!storage_size || size > remaining_size ||
- (max_size && size > max_size))
- return EFI_OUT_OF_RESOURCES;
-
- if (!efi_no_storage_paranoia &&
- ((active_size + size + VAR_METADATA_SIZE > storage_size / 2) &&
- (remaining_size - size < storage_size / 2)))
- return EFI_OUT_OF_RESOURCES;
-
- return EFI_SUCCESS;
-}
-EXPORT_SYMBOL_GPL(efi_query_variable_store);
diff --git a/trunk/arch/x86/xen/mmu.c b/trunk/arch/x86/xen/mmu.c
index e006c18d288a..6afbb2ca9a0a 100644
--- a/trunk/arch/x86/xen/mmu.c
+++ b/trunk/arch/x86/xen/mmu.c
@@ -1748,18 +1748,14 @@ static void *m2v(phys_addr_t maddr)
}
/* Set the page permissions on an identity-mapped pages */
-static void set_page_prot_flags(void *addr, pgprot_t prot, unsigned long flags)
+static void set_page_prot(void *addr, pgprot_t prot)
{
unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
pte_t pte = pfn_pte(pfn, prot);
- if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
+ if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0))
BUG();
}
-static void set_page_prot(void *addr, pgprot_t prot)
-{
- return set_page_prot_flags(addr, prot, UVMF_NONE);
-}
#ifdef CONFIG_X86_32
static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn)
{
@@ -1843,12 +1839,12 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end,
unsigned long addr)
{
if (*pt_base == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ set_page_prot((void *)addr, PAGE_KERNEL);
clear_page((void *)addr);
(*pt_base)++;
}
if (*pt_end == PFN_DOWN(__pa(addr))) {
- set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG);
+ set_page_prot((void *)addr, PAGE_KERNEL);
clear_page((void *)addr);
(*pt_end)--;
}
@@ -2200,7 +2196,6 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
.lazy_mode = {
.enter = paravirt_enter_lazy_mmu,
.leave = xen_leave_lazy_mmu,
- .flush = paravirt_flush_lazy_mmu,
},
.set_fixmap = xen_set_fixmap,
diff --git a/trunk/block/blk-core.c b/trunk/block/blk-core.c
index 7c288358a745..074b758efc42 100644
--- a/trunk/block/blk-core.c
+++ b/trunk/block/blk-core.c
@@ -39,7 +39,6 @@
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap);
-EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete);
EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
DEFINE_IDA(blk_queue_ida);
diff --git a/trunk/block/blk-sysfs.c b/trunk/block/blk-sysfs.c
index 5efc5a647183..6206a934eb8c 100644
--- a/trunk/block/blk-sysfs.c
+++ b/trunk/block/blk-sysfs.c
@@ -229,8 +229,6 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \
unsigned long val; \
ssize_t ret; \
ret = queue_var_store(&val, page, count); \
- if (ret < 0) \
- return ret; \
if (neg) \
val = !val; \
\
diff --git a/trunk/block/partition-generic.c b/trunk/block/partition-generic.c
index 789cdea05893..ae95ee6a58aa 100644
--- a/trunk/block/partition-generic.c
+++ b/trunk/block/partition-generic.c
@@ -257,6 +257,7 @@ void delete_partition(struct gendisk *disk, int partno)
hd_struct_put(part);
}
+EXPORT_SYMBOL(delete_partition);
static ssize_t whole_disk_show(struct device *dev,
struct device_attribute *attr, char *buf)
diff --git a/trunk/crypto/gcm.c b/trunk/crypto/gcm.c
index 13ccbda34ff9..137ad1ec5438 100644
--- a/trunk/crypto/gcm.c
+++ b/trunk/crypto/gcm.c
@@ -44,7 +44,6 @@ struct crypto_rfc4543_ctx {
struct crypto_rfc4543_req_ctx {
u8 auth_tag[16];
- u8 assocbuf[32];
struct scatterlist cipher[1];
struct scatterlist payload[2];
struct scatterlist assoc[2];
@@ -1134,19 +1133,9 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);
- if (req->assoc->length == req->assoclen) {
- sg_init_table(assoc, 2);
- sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
- req->assoc->offset);
- } else {
- BUG_ON(req->assoclen > sizeof(rctx->assocbuf));
-
- scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
- req->assoclen, 0);
-
- sg_init_table(assoc, 2);
- sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
- }
+ sg_init_table(assoc, 2);
+ sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
+ req->assoc->offset);
scatterwalk_crypto_chain(assoc, payload, 0, 2);
aead_request_set_tfm(subreq, ctx->child);
diff --git a/trunk/drivers/acpi/Kconfig b/trunk/drivers/acpi/Kconfig
index 4bf68c8d4797..92ed9692c47e 100644
--- a/trunk/drivers/acpi/Kconfig
+++ b/trunk/drivers/acpi/Kconfig
@@ -396,7 +396,7 @@ config ACPI_CUSTOM_METHOD
config ACPI_BGRT
bool "Boottime Graphics Resource Table support"
- depends on EFI && X86
+ depends on EFI
help
This driver adds support for exposing the ACPI Boottime Graphics
Resource Table, which allows the operating system to obtain
diff --git a/trunk/drivers/acpi/acpi_i2c.c b/trunk/drivers/acpi/acpi_i2c.c
index a82c7626aa9b..82045e3f5cac 100644
--- a/trunk/drivers/acpi/acpi_i2c.c
+++ b/trunk/drivers/acpi/acpi_i2c.c
@@ -90,7 +90,7 @@ void acpi_i2c_register_devices(struct i2c_adapter *adapter)
acpi_handle handle;
acpi_status status;
- handle = ACPI_HANDLE(adapter->dev.parent);
+ handle = ACPI_HANDLE(&adapter->dev);
if (!handle)
return;
diff --git a/trunk/drivers/acpi/pci_root.c b/trunk/drivers/acpi/pci_root.c
index 6ae5e440436e..5ff173066127 100644
--- a/trunk/drivers/acpi/pci_root.c
+++ b/trunk/drivers/acpi/pci_root.c
@@ -415,6 +415,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root;
struct acpi_pci_driver *driver;
u32 flags, base_flags;
+ bool is_osc_granted = false;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root)
@@ -475,30 +476,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
flags = base_flags = OSC_PCI_SEGMENT_GROUPS_SUPPORT;
acpi_pci_osc_support(root, flags);
- /*
- * TBD: Need PCI interface for enumeration/configuration of roots.
- */
-
- mutex_lock(&acpi_pci_root_lock);
- list_add_tail(&root->node, &acpi_pci_roots);
- mutex_unlock(&acpi_pci_root_lock);
-
- /*
- * Scan the Root Bridge
- * --------------------
- * Must do this prior to any attempt to bind the root device, as the
- * PCI namespace does not get created until this call is made (and
- * thus the root bridge's pci_dev does not exist).
- */
- root->bus = pci_acpi_scan_root(root);
- if (!root->bus) {
- printk(KERN_ERR PREFIX
- "Bus %04x:%02x not present in PCI namespace\n",
- root->segment, (unsigned int)root->secondary.start);
- result = -ENODEV;
- goto out_del_root;
- }
-
/* Indicate support for various _OSC capabilities. */
if (pci_ext_cfg_avail())
flags |= OSC_EXT_PCI_CONFIG_SUPPORT;
@@ -517,7 +494,6 @@ static int acpi_pci_root_add(struct acpi_device *device,
flags = base_flags;
}
}
-
if (!pcie_ports_disabled
&& (flags & ACPI_PCIE_REQ_SUPPORT) == ACPI_PCIE_REQ_SUPPORT) {
flags = OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL
@@ -538,28 +514,54 @@ static int acpi_pci_root_add(struct acpi_device *device,
status = acpi_pci_osc_control_set(device->handle, &flags,
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL);
if (ACPI_SUCCESS(status)) {
+ is_osc_granted = true;
dev_info(&device->dev,
"ACPI _OSC control (0x%02x) granted\n", flags);
- if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM) {
- /*
- * We have ASPM control, but the FADT indicates
- * that it's unsupported. Clear it.
- */
- pcie_clear_aspm(root->bus);
- }
} else {
+ is_osc_granted = false;
dev_info(&device->dev,
"ACPI _OSC request failed (%s), "
"returned control mask: 0x%02x\n",
acpi_format_exception(status), flags);
- pr_info("ACPI _OSC control for PCIe not granted, "
- "disabling ASPM\n");
- pcie_no_aspm();
}
} else {
dev_info(&device->dev,
- "Unable to request _OSC control "
- "(_OSC support mask: 0x%02x)\n", flags);
+ "Unable to request _OSC control "
+ "(_OSC support mask: 0x%02x)\n", flags);
+ }
+
+ /*
+ * TBD: Need PCI interface for enumeration/configuration of roots.
+ */
+
+ mutex_lock(&acpi_pci_root_lock);
+ list_add_tail(&root->node, &acpi_pci_roots);
+ mutex_unlock(&acpi_pci_root_lock);
+
+ /*
+ * Scan the Root Bridge
+ * --------------------
+ * Must do this prior to any attempt to bind the root device, as the
+ * PCI namespace does not get created until this call is made (and
+ * thus the root bridge's pci_dev does not exist).
+ */
+ root->bus = pci_acpi_scan_root(root);
+ if (!root->bus) {
+ printk(KERN_ERR PREFIX
+ "Bus %04x:%02x not present in PCI namespace\n",
+ root->segment, (unsigned int)root->secondary.start);
+ result = -ENODEV;
+ goto out_del_root;
+ }
+
+ /* ASPM setting */
+ if (is_osc_granted) {
+ if (acpi_gbl_FADT.boot_flags & ACPI_FADT_NO_ASPM)
+ pcie_clear_aspm(root->bus);
+ } else {
+ pr_info("ACPI _OSC control for PCIe not granted, "
+ "disabling ASPM\n");
+ pcie_no_aspm();
}
pci_acpi_add_bus_pm_notifier(device, root->bus);
diff --git a/trunk/drivers/acpi/processor_idle.c b/trunk/drivers/acpi/processor_idle.c
index ee255c60bdac..fc95308e9a11 100644
--- a/trunk/drivers/acpi/processor_idle.c
+++ b/trunk/drivers/acpi/processor_idle.c
@@ -66,8 +66,7 @@ module_param(latency_factor, uint, 0644);
static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
-static DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX],
- acpi_cstate);
+static struct acpi_processor_cx *acpi_cstate[CPUIDLE_STATE_MAX];
static int disabled_by_idle_boot_param(void)
{
@@ -723,7 +722,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -746,7 +745,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
*/
static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
{
- struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
ACPI_FLUSH_CPU_CACHE();
@@ -776,7 +775,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -834,7 +833,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
struct cpuidle_driver *drv, int index)
{
struct acpi_processor *pr;
- struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
+ struct acpi_processor_cx *cx = acpi_cstate[index];
pr = __this_cpu_read(processors);
@@ -961,7 +960,7 @@ static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
!(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
continue;
#endif
- per_cpu(acpi_cstate[count], dev->cpu) = cx;
+ acpi_cstate[count] = cx;
count++;
if (count == CPUIDLE_STATE_MAX)
diff --git a/trunk/drivers/ata/ata_piix.c b/trunk/drivers/ata/ata_piix.c
index 2f48123d74c4..ffdd32d22602 100644
--- a/trunk/drivers/ata/ata_piix.c
+++ b/trunk/drivers/ata/ata_piix.c
@@ -150,7 +150,6 @@ enum piix_controller_ids {
tolapai_sata,
piix_pata_vmw, /* PIIX4 for VMware, spurious DMA_ERR */
ich8_sata_snb,
- ich8_2port_sata_snb,
};
struct piix_map_db {
@@ -305,7 +304,7 @@ static const struct pci_device_id piix_pci_tbl[] = {
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c01, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata_snb },
/* SATA Controller IDE (Lynx Point) */
- { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata_snb },
+ { 0x8086, 0x8c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point) */
{ 0x8086, 0x8c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata },
/* SATA Controller IDE (Lynx Point-LP) */
@@ -440,7 +439,6 @@ static const struct piix_map_db *piix_map_db_table[] = {
[ich8m_apple_sata] = &ich8m_apple_map_db,
[tolapai_sata] = &tolapai_map_db,
[ich8_sata_snb] = &ich8_map_db,
- [ich8_2port_sata_snb] = &ich8_2port_map_db,
};
static struct pci_bits piix_enable_bits[] = {
@@ -1244,16 +1242,6 @@ static struct ata_port_info piix_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &piix_sata_ops,
},
-
- [ich8_2port_sata_snb] =
- {
- .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR
- | PIIX_FLAG_PIO16,
- .pio_mask = ATA_PIO4,
- .mwdma_mask = ATA_MWDMA2,
- .udma_mask = ATA_UDMA6,
- .port_ops = &piix_sata_ops,
- },
};
#define AHCI_PCI_BAR 5
diff --git a/trunk/drivers/ata/libata-core.c b/trunk/drivers/ata/libata-core.c
index 63c743baf920..497adea1f0d6 100644
--- a/trunk/drivers/ata/libata-core.c
+++ b/trunk/drivers/ata/libata-core.c
@@ -2329,7 +2329,7 @@ int ata_dev_configure(struct ata_device *dev)
* from SATA Settings page of Identify Device Data Log.
*/
if (ata_id_has_devslp(dev->id)) {
- u8 *sata_setting = ap->sector_buf;
+ u8 sata_setting[ATA_SECT_SIZE];
int i, j;
dev->flags |= ATA_DFLAG_DEVSLP;
@@ -2439,9 +2439,6 @@ int ata_dev_configure(struct ata_device *dev)
dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
dev->max_sectors);
- if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
- dev->max_sectors = ATA_MAX_SECTORS_LBA48;
-
if (ap->ops->dev_config)
ap->ops->dev_config(dev);
@@ -4103,7 +4100,6 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Weird ATAPI devices */
{ "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
{ "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA },
- { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
/* Devices we expect to fail diagnostics */
diff --git a/trunk/drivers/ata/libata-scsi.c b/trunk/drivers/ata/libata-scsi.c
index ff44787e5a45..318b41358187 100644
--- a/trunk/drivers/ata/libata-scsi.c
+++ b/trunk/drivers/ata/libata-scsi.c
@@ -532,8 +532,8 @@ int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg)
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- if (sshdr.sense_key == RECOVERED_ERROR &&
- sshdr.asc == 0 && sshdr.ascq == 0x1d)
+ if (sshdr.sense_key == 0 &&
+ sshdr.asc == 0 && sshdr.ascq == 0)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
@@ -618,8 +618,8 @@ int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg)
struct scsi_sense_hdr sshdr;
scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE,
&sshdr);
- if (sshdr.sense_key == RECOVERED_ERROR &&
- sshdr.asc == 0 && sshdr.ascq == 0x1d)
+ if (sshdr.sense_key == 0 &&
+ sshdr.asc == 0 && sshdr.ascq == 0)
cmd_result &= ~SAM_STAT_CHECK_CONDITION;
}
diff --git a/trunk/drivers/base/power/qos.c b/trunk/drivers/base/power/qos.c
index 71671c42ef45..5f74587ef258 100644
--- a/trunk/drivers/base/power/qos.c
+++ b/trunk/drivers/base/power/qos.c
@@ -46,7 +46,6 @@
#include "power.h"
static DEFINE_MUTEX(dev_pm_qos_mtx);
-static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
static BLOCKING_NOTIFIER_HEAD(dev_pm_notifiers);
@@ -217,17 +216,12 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
struct pm_qos_constraints *c;
struct pm_qos_flags *f;
- mutex_lock(&dev_pm_qos_sysfs_mtx);
+ mutex_lock(&dev_pm_qos_mtx);
/*
* If the device's PM QoS resume latency limit or PM QoS flags have been
* exposed to user space, they have to be hidden at this point.
*/
- pm_qos_sysfs_remove_latency(dev);
- pm_qos_sysfs_remove_flags(dev);
-
- mutex_lock(&dev_pm_qos_mtx);
-
__dev_pm_qos_hide_latency_limit(dev);
__dev_pm_qos_hide_flags(dev);
@@ -260,8 +254,6 @@ void dev_pm_qos_constraints_destroy(struct device *dev)
out:
mutex_unlock(&dev_pm_qos_mtx);
-
- mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
/**
@@ -566,14 +558,6 @@ static void __dev_pm_qos_drop_user_request(struct device *dev,
kfree(req);
}
-static void dev_pm_qos_drop_user_request(struct device *dev,
- enum dev_pm_qos_req_type type)
-{
- mutex_lock(&dev_pm_qos_mtx);
- __dev_pm_qos_drop_user_request(dev, type);
- mutex_unlock(&dev_pm_qos_mtx);
-}
-
/**
* dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
* @dev: Device whose PM QoS latency limit is to be exposed to user space.
@@ -597,8 +581,6 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
return ret;
}
- mutex_lock(&dev_pm_qos_sysfs_mtx);
-
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -609,27 +591,26 @@ int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
- mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
- dev->power.qos->latency_req = req;
-
- mutex_unlock(&dev_pm_qos_mtx);
+ dev->power.qos->latency_req = req;
ret = pm_qos_sysfs_add_latency(dev);
if (ret)
- dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
out:
- mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ mutex_unlock(&dev_pm_qos_mtx);
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
static void __dev_pm_qos_hide_latency_limit(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req)
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->latency_req) {
+ pm_qos_sysfs_remove_latency(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY);
+ }
}
/**
@@ -638,15 +619,9 @@ static void __dev_pm_qos_hide_latency_limit(struct device *dev)
*/
void dev_pm_qos_hide_latency_limit(struct device *dev)
{
- mutex_lock(&dev_pm_qos_sysfs_mtx);
-
- pm_qos_sysfs_remove_latency(dev);
-
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_latency_limit(dev);
mutex_unlock(&dev_pm_qos_mtx);
-
- mutex_unlock(&dev_pm_qos_sysfs_mtx);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
@@ -674,8 +649,6 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
}
pm_runtime_get_sync(dev);
- mutex_lock(&dev_pm_qos_sysfs_mtx);
-
mutex_lock(&dev_pm_qos_mtx);
if (IS_ERR_OR_NULL(dev->power.qos))
@@ -686,19 +659,16 @@ int dev_pm_qos_expose_flags(struct device *dev, s32 val)
if (ret < 0) {
__dev_pm_qos_remove_request(req);
kfree(req);
- mutex_unlock(&dev_pm_qos_mtx);
goto out;
}
- dev->power.qos->flags_req = req;
-
- mutex_unlock(&dev_pm_qos_mtx);
+ dev->power.qos->flags_req = req;
ret = pm_qos_sysfs_add_flags(dev);
if (ret)
- dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ __dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
out:
- mutex_unlock(&dev_pm_qos_sysfs_mtx);
+ mutex_unlock(&dev_pm_qos_mtx);
pm_runtime_put(dev);
return ret;
}
@@ -706,8 +676,10 @@ EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
static void __dev_pm_qos_hide_flags(struct device *dev)
{
- if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
+ if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req) {
+ pm_qos_sysfs_remove_flags(dev);
__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
+ }
}
/**
@@ -717,15 +689,9 @@ static void __dev_pm_qos_hide_flags(struct device *dev)
void dev_pm_qos_hide_flags(struct device *dev)
{
pm_runtime_get_sync(dev);
- mutex_lock(&dev_pm_qos_sysfs_mtx);
-
- pm_qos_sysfs_remove_flags(dev);
-
mutex_lock(&dev_pm_qos_mtx);
__dev_pm_qos_hide_flags(dev);
mutex_unlock(&dev_pm_qos_mtx);
-
- mutex_unlock(&dev_pm_qos_sysfs_mtx);
pm_runtime_put(dev);
}
EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
diff --git a/trunk/drivers/base/regmap/regcache-rbtree.c b/trunk/drivers/base/regmap/regcache-rbtree.c
index 79f4fca9877a..e6732cf7c06e 100644
--- a/trunk/drivers/base/regmap/regcache-rbtree.c
+++ b/trunk/drivers/base/regmap/regcache-rbtree.c
@@ -398,7 +398,7 @@ static int regcache_rbtree_sync(struct regmap *map, unsigned int min,
base = 0;
if (max < rbnode->base_reg + rbnode->blklen)
- end = max - rbnode->base_reg + 1;
+ end = rbnode->base_reg + rbnode->blklen - max;
else
end = rbnode->blklen;
diff --git a/trunk/drivers/base/regmap/regmap.c b/trunk/drivers/base/regmap/regmap.c
index 58cfb3232428..3d2367501fd0 100644
--- a/trunk/drivers/base/regmap/regmap.c
+++ b/trunk/drivers/base/regmap/regmap.c
@@ -710,12 +710,12 @@ struct regmap *regmap_init(struct device *dev,
}
}
- regmap_debugfs_init(map, config->name);
-
ret = regcache_init(map, config);
if (ret != 0)
goto err_range;
+ regmap_debugfs_init(map, config->name);
+
/* Add a devres resource for dev_get_regmap() */
m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
if (!m) {
@@ -1036,8 +1036,6 @@ static int _regmap_raw_write(struct regmap *map, unsigned int reg,
kfree(async->work_buf);
kfree(async);
}
-
- return ret;
}
trace_regmap_hw_write_start(map->dev, reg,
diff --git a/trunk/drivers/block/aoe/aoecmd.c b/trunk/drivers/block/aoe/aoecmd.c
index 92b6d7c51e39..25ef5c014fca 100644
--- a/trunk/drivers/block/aoe/aoecmd.c
+++ b/trunk/drivers/block/aoe/aoecmd.c
@@ -51,9 +51,8 @@ new_skb(ulong len)
{
struct sk_buff *skb;
- skb = alloc_skb(len + MAX_HEADER, GFP_ATOMIC);
+ skb = alloc_skb(len, GFP_ATOMIC);
if (skb) {
- skb_reserve(skb, MAX_HEADER);
skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb->protocol = __constant_htons(ETH_P_AOE);
diff --git a/trunk/drivers/block/loop.c b/trunk/drivers/block/loop.c
index dfe758382eaf..fe5f6403417f 100644
--- a/trunk/drivers/block/loop.c
+++ b/trunk/drivers/block/loop.c
@@ -922,11 +922,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->lo_flags |= LO_FLAGS_PARTSCAN;
if (lo->lo_flags & LO_FLAGS_PARTSCAN)
ioctl_by_bdev(bdev, BLKRRPART, 0);
-
- /* Grab the block_device to prevent its destruction after we
- * put /dev/loopXX inode. Later in loop_clr_fd() we bdput(bdev).
- */
- bdgrab(bdev);
return 0;
out_clr:
@@ -1036,10 +1031,8 @@ static int loop_clr_fd(struct loop_device *lo)
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
- if (bdev) {
- bdput(bdev);
+ if (bdev)
invalidate_bdev(bdev);
- }
set_capacity(lo->lo_disk, 0);
loop_sysfs_exit(lo);
if (bdev) {
@@ -1051,12 +1044,29 @@ static int loop_clr_fd(struct loop_device *lo)
lo->lo_state = Lo_unbound;
/* This is safe: open() is still holding a reference. */
module_put(THIS_MODULE);
- if (lo->lo_flags & LO_FLAGS_PARTSCAN && bdev)
- ioctl_by_bdev(bdev, BLKRRPART, 0);
lo->lo_flags = 0;
if (!part_shift)
lo->lo_disk->flags |= GENHD_FL_NO_PART_SCAN;
mutex_unlock(&lo->lo_ctl_mutex);
+
+ /*
+ * Remove all partitions, since BLKRRPART won't remove user
+ * added partitions when max_part=0
+ */
+ if (bdev) {
+ struct disk_part_iter piter;
+ struct hd_struct *part;
+
+ mutex_lock_nested(&bdev->bd_mutex, 1);
+ invalidate_partition(bdev->bd_disk, 0);
+ disk_part_iter_init(&piter, bdev->bd_disk,
+ DISK_PITER_INCL_EMPTY);
+ while ((part = disk_part_iter_next(&piter)))
+ delete_partition(bdev->bd_disk, part->partno);
+ disk_part_iter_exit(&piter);
+ mutex_unlock(&bdev->bd_mutex);
+ }
+
/*
* Need not hold lo_ctl_mutex to fput backing file.
* Calling fput holding lo_ctl_mutex triggers a circular
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.c b/trunk/drivers/block/mtip32xx/mtip32xx.c
index 32c678028e53..92250af84e7d 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.c
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.c
@@ -81,17 +81,12 @@
/* Device instance number, incremented each time a device is probed. */
static int instance;
-struct list_head online_list;
-struct list_head removing_list;
-spinlock_t dev_lock;
-
/*
* Global variable used to hold the major block device number
* allocated in mtip_init().
*/
static int mtip_major;
static struct dentry *dfs_parent;
-static struct dentry *dfs_device_status;
static u32 cpu_use[NR_CPUS];
@@ -248,31 +243,40 @@ static inline void release_slot(struct mtip_port *port, int tag)
/*
* Reset the HBA (without sleeping)
*
+ * Just like hba_reset, except does not call sleep, so can be
+ * run from interrupt/tasklet context.
+ *
* @dd Pointer to the driver data structure.
*
* return value
* 0 The reset was successful.
* -1 The HBA Reset bit did not clear.
*/
-static int mtip_hba_reset(struct driver_data *dd)
+static int hba_reset_nosleep(struct driver_data *dd)
{
unsigned long timeout;
+ /* Chip quirk: quiesce any chip function */
+ mdelay(10);
+
/* Set the reset bit */
writel(HOST_RESET, dd->mmio + HOST_CTL);
/* Flush */
readl(dd->mmio + HOST_CTL);
- /* Spin for up to 2 seconds, waiting for reset acknowledgement */
- timeout = jiffies + msecs_to_jiffies(2000);
- do {
- mdelay(10);
- if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
- return -1;
+ /*
+ * Wait 10ms then spin for up to 1 second
+ * waiting for reset acknowledgement
+ */
+ timeout = jiffies + msecs_to_jiffies(1000);
+ mdelay(10);
+ while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
+ && time_before(jiffies, timeout))
+ mdelay(1);
- } while ((readl(dd->mmio + HOST_CTL) & HOST_RESET)
- && time_before(jiffies, timeout));
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
+ return -1;
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
return -1;
@@ -477,7 +481,7 @@ static void mtip_restart_port(struct mtip_port *port)
dev_warn(&port->dd->pdev->dev,
"PxCMD.CR not clear, escalating reset\n");
- if (mtip_hba_reset(port->dd))
+ if (hba_reset_nosleep(port->dd))
dev_err(&port->dd->pdev->dev,
"HBA reset escalation failed.\n");
@@ -523,26 +527,6 @@ static void mtip_restart_port(struct mtip_port *port)
}
-static int mtip_device_reset(struct driver_data *dd)
-{
- int rv = 0;
-
- if (mtip_check_surprise_removal(dd->pdev))
- return 0;
-
- if (mtip_hba_reset(dd) < 0)
- rv = -EFAULT;
-
- mdelay(1);
- mtip_init_port(dd->port);
- mtip_start_port(dd->port);
-
- /* Enable interrupts on the HBA. */
- writel(readl(dd->mmio + HOST_CTL) | HOST_IRQ_EN,
- dd->mmio + HOST_CTL);
- return rv;
-}
-
/*
* Helper function for tag logging
*/
@@ -648,7 +632,7 @@ static void mtip_timeout_function(unsigned long int data)
if (cmdto_cnt) {
print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
- mtip_device_reset(port->dd);
+ mtip_restart_port(port);
wake_up_interruptible(&port->svc_wait);
}
clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
@@ -1299,11 +1283,11 @@ static int mtip_exec_internal_command(struct mtip_port *port,
int rv = 0, ready2go = 1;
struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
unsigned long to;
- struct driver_data *dd = port->dd;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
- dev_err(&dd->pdev->dev, "SG buffer is not 8 byte aligned\n");
+ dev_err(&port->dd->pdev->dev,
+ "SG buffer is not 8 byte aligned\n");
return -EFAULT;
}
@@ -1316,21 +1300,23 @@ static int mtip_exec_internal_command(struct mtip_port *port,
mdelay(100);
} while (time_before(jiffies, to));
if (!ready2go) {
- dev_warn(&dd->pdev->dev,
+ dev_warn(&port->dd->pdev->dev,
"Internal cmd active. new cmd [%02X]\n", fis->command);
return -EBUSY;
}
set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
port->ic_pause_timer = 0;
- clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
- clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+ if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
+ clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
+ clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
if (atomic == GFP_KERNEL) {
if (fis->command != ATA_CMD_STANDBYNOW1) {
/* wait for io to complete if non atomic */
if (mtip_quiesce_io(port, 5000) < 0) {
- dev_warn(&dd->pdev->dev,
+ dev_warn(&port->dd->pdev->dev,
"Failed to quiesce IO\n");
release_slot(port, MTIP_TAG_INTERNAL);
clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
@@ -1375,84 +1361,58 @@ static int mtip_exec_internal_command(struct mtip_port *port,
/* Issue the command to the hardware */
mtip_issue_non_ncq_command(port, MTIP_TAG_INTERNAL);
+ /* Poll if atomic, wait_for_completion otherwise */
if (atomic == GFP_KERNEL) {
/* Wait for the command to complete or timeout. */
- if (wait_for_completion_interruptible_timeout(
+ if (wait_for_completion_timeout(
&wait,
- msecs_to_jiffies(timeout)) <= 0) {
- if (rv == -ERESTARTSYS) { /* interrupted */
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] was interrupted after %lu ms\n",
- fis->command, timeout);
- rv = -EINTR;
- goto exec_ic_exit;
- } else if (rv == 0) /* timeout */
- dev_err(&dd->pdev->dev,
- "Internal command did not complete [%02X] within timeout of %lu ms\n",
- fis->command, timeout);
- else
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned code [%d] after %lu ms - unhandled\n",
- fis->command, rv, timeout);
-
- if (mtip_check_surprise_removal(dd->pdev) ||
+ msecs_to_jiffies(timeout)) == 0) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command did not complete [%d] "
+ "within timeout of %lu ms\n",
+ atomic, timeout);
+ if (mtip_check_surprise_removal(port->dd->pdev) ||
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] wait returned due to SR\n",
- fis->command);
+ &port->dd->dd_flag)) {
rv = -ENXIO;
goto exec_ic_exit;
}
- mtip_device_reset(dd); /* recover from timeout issue */
rv = -EAGAIN;
- goto exec_ic_exit;
}
} else {
- u32 hba_stat, port_stat;
-
/* Spin for checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL))
&& time_before(jiffies, timeout)) {
- if (mtip_check_surprise_removal(dd->pdev)) {
+ if (mtip_check_surprise_removal(port->dd->pdev)) {
rv = -ENXIO;
goto exec_ic_exit;
}
if ((fis->command != ATA_CMD_STANDBYNOW1) &&
test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
- &dd->dd_flag)) {
+ &port->dd->dd_flag)) {
rv = -ENXIO;
goto exec_ic_exit;
}
- port_stat = readl(port->mmio + PORT_IRQ_STAT);
- if (!port_stat)
- continue;
-
- if (port_stat & PORT_IRQ_ERR) {
- dev_err(&dd->pdev->dev,
- "Internal command [%02X] failed\n",
- fis->command);
- mtip_device_reset(dd);
- rv = -EIO;
- goto exec_ic_exit;
- } else {
- writel(port_stat, port->mmio + PORT_IRQ_STAT);
- hba_stat = readl(dd->mmio + HOST_IRQ_STAT);
- if (hba_stat)
- writel(hba_stat,
- dd->mmio + HOST_IRQ_STAT);
+ if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
+ atomic_inc(&int_cmd->active); /* error */
+ break;
}
- break;
}
}
+ if (atomic_read(&int_cmd->active) > 1) {
+ dev_err(&port->dd->pdev->dev,
+ "Internal command [%02X] failed\n", fis->command);
+ rv = -EIO;
+ }
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
rv = -ENXIO;
- if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
- mtip_device_reset(dd);
+ if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ mtip_restart_port(port);
rv = -EAGAIN;
}
}
@@ -1764,8 +1724,7 @@ static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
* -EINVAL Invalid parameters passed in, trim not supported
* -EIO Error submitting trim request to hw
*/
-static int mtip_send_trim(struct driver_data *dd, unsigned int lba,
- unsigned int len)
+static int mtip_send_trim(struct driver_data *dd, unsigned int lba, unsigned int len)
{
int i, rv = 0;
u64 tlba, tlen, sect_left;
@@ -1851,6 +1810,45 @@ static bool mtip_hw_get_capacity(struct driver_data *dd, sector_t *sectors)
return (bool) !!port->identify_valid;
}
+/*
+ * Reset the HBA.
+ *
+ * Resets the HBA by setting the HBA Reset bit in the Global
+ * HBA Control register. After setting the HBA Reset bit the
+ * function waits for 1 second before reading the HBA Reset
+ * bit to make sure it has cleared. If HBA Reset is not clear
+ * an error is returned. Cannot be used in non-blockable
+ * context.
+ *
+ * @dd Pointer to the driver data structure.
+ *
+ * return value
+ * 0 The reset was successful.
+ * -1 The HBA Reset bit did not clear.
+ */
+static int mtip_hba_reset(struct driver_data *dd)
+{
+ mtip_deinit_port(dd->port);
+
+ /* Set the reset bit */
+ writel(HOST_RESET, dd->mmio + HOST_CTL);
+
+ /* Flush */
+ readl(dd->mmio + HOST_CTL);
+
+ /* Wait for reset to clear */
+ ssleep(1);
+
+ /* Check the bit has cleared */
+ if (readl(dd->mmio + HOST_CTL) & HOST_RESET) {
+ dev_err(&dd->pdev->dev,
+ "Reset bit did not clear.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
/*
* Display the identify command data.
*
@@ -2712,100 +2710,6 @@ static ssize_t mtip_hw_show_status(struct device *dev,
static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
-/* debugsfs entries */
-
-static ssize_t show_device_status(struct device_driver *drv, char *buf)
-{
- int size = 0;
- struct driver_data *dd, *tmp;
- unsigned long flags;
- char id_buf[42];
- u16 status = 0;
-
- spin_lock_irqsave(&dev_lock, flags);
- size += sprintf(&buf[size], "Devices Present:\n");
- list_for_each_entry_safe(dd, tmp, &online_list, online_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify + 10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
-
- size += sprintf(&buf[size], "Devices Being Removed:\n");
- list_for_each_entry_safe(dd, tmp, &removing_list, remove_list) {
- if (dd->pdev) {
- if (dd->port &&
- dd->port->identify &&
- dd->port->identify_valid) {
- strlcpy(id_buf,
- (char *) (dd->port->identify+10), 21);
- status = *(dd->port->identify + 141);
- } else {
- memset(id_buf, 0, 42);
- status = 0;
- }
-
- if (dd->port &&
- test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags)) {
- size += sprintf(&buf[size],
- " device %s %s (ftl rebuild %d %%)\n",
- dev_name(&dd->pdev->dev),
- id_buf,
- status);
- } else {
- size += sprintf(&buf[size],
- " device %s %s\n",
- dev_name(&dd->pdev->dev),
- id_buf);
- }
- }
- }
- spin_unlock_irqrestore(&dev_lock, flags);
-
- return size;
-}
-
-static ssize_t mtip_hw_read_device_status(struct file *f, char __user *ubuf,
- size_t len, loff_t *offset)
-{
- int size = *offset;
- char buf[MTIP_DFS_MAX_BUF_SIZE];
-
- if (!len || *offset)
- return 0;
-
- size += show_device_status(NULL, buf);
-
- *offset = size <= len ? size : len;
- size = copy_to_user(ubuf, buf, *offset);
- if (size)
- return -EFAULT;
-
- return *offset;
-}
-
static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf,
size_t len, loff_t *offset)
{
@@ -2900,13 +2804,6 @@ static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf,
return *offset;
}
-static const struct file_operations mtip_device_status_fops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .read = mtip_hw_read_device_status,
- .llseek = no_llseek,
-};
-
static const struct file_operations mtip_regs_fops = {
.owner = THIS_MODULE,
.open = simple_open,
@@ -4264,7 +4161,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
const struct cpumask *node_mask;
int cpu, i = 0, j = 0;
int my_node = NUMA_NO_NODE;
- unsigned long flags;
/* Allocate memory for this devices private data. */
my_node = pcibus_to_node(pdev->bus);
@@ -4322,9 +4218,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
dd->pdev = pdev;
dd->numa_node = my_node;
- INIT_LIST_HEAD(&dd->online_list);
- INIT_LIST_HEAD(&dd->remove_list);
-
memset(dd->workq_name, 0, 32);
snprintf(dd->workq_name, 31, "mtipq%d", dd->instance);
@@ -4412,14 +4305,6 @@ static int mtip_pci_probe(struct pci_dev *pdev,
instance++;
if (rv != MTIP_FTL_REBUILD_MAGIC)
set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
- else
- rv = 0; /* device in rebuild state, return 0 from probe */
-
- /* Add to online list even if in ftl rebuild */
- spin_lock_irqsave(&dev_lock, flags);
- list_add(&dd->online_list, &online_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
goto done;
block_initialize_err:
@@ -4453,15 +4338,9 @@ static void mtip_pci_remove(struct pci_dev *pdev)
{
struct driver_data *dd = pci_get_drvdata(pdev);
int counter = 0;
- unsigned long flags;
set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->online_list);
- list_add(&dd->remove_list, &removing_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
if (mtip_check_surprise_removal(pdev)) {
while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
counter++;
@@ -4487,10 +4366,6 @@ static void mtip_pci_remove(struct pci_dev *pdev)
pci_disable_msi(pdev);
- spin_lock_irqsave(&dev_lock, flags);
- list_del_init(&dd->remove_list);
- spin_unlock_irqrestore(&dev_lock, flags);
-
kfree(dd);
pcim_iounmap_regions(pdev, 1 << MTIP_ABAR);
}
@@ -4638,11 +4513,6 @@ static int __init mtip_init(void)
pr_info(MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
- spin_lock_init(&dev_lock);
-
- INIT_LIST_HEAD(&online_list);
- INIT_LIST_HEAD(&removing_list);
-
/* Allocate a major block device number to use with this driver. */
error = register_blkdev(0, MTIP_DRV_NAME);
if (error <= 0) {
@@ -4652,18 +4522,11 @@ static int __init mtip_init(void)
}
mtip_major = error;
- dfs_parent = debugfs_create_dir("rssd", NULL);
- if (IS_ERR_OR_NULL(dfs_parent)) {
- pr_warn("Error creating debugfs parent\n");
- dfs_parent = NULL;
- }
- if (dfs_parent) {
- dfs_device_status = debugfs_create_file("device_status",
- S_IRUGO, dfs_parent, NULL,
- &mtip_device_status_fops);
- if (IS_ERR_OR_NULL(dfs_device_status)) {
- pr_err("Error creating device_status node\n");
- dfs_device_status = NULL;
+ if (!dfs_parent) {
+ dfs_parent = debugfs_create_dir("rssd", NULL);
+ if (IS_ERR_OR_NULL(dfs_parent)) {
+ pr_warn("Error creating debugfs parent\n");
+ dfs_parent = NULL;
}
}
diff --git a/trunk/drivers/block/mtip32xx/mtip32xx.h b/trunk/drivers/block/mtip32xx/mtip32xx.h
index 8e8334c9dd0f..3bffff5f670c 100644
--- a/trunk/drivers/block/mtip32xx/mtip32xx.h
+++ b/trunk/drivers/block/mtip32xx/mtip32xx.h
@@ -129,9 +129,9 @@ enum {
MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
- MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) |
- (1 << MTIP_PF_EH_ACTIVE_BIT) |
- (1 << MTIP_PF_SE_ACTIVE_BIT) |
+ MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+ (1 << MTIP_PF_EH_ACTIVE_BIT) | \
+ (1 << MTIP_PF_SE_ACTIVE_BIT) | \
(1 << MTIP_PF_DM_ACTIVE_BIT)),
MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
@@ -144,9 +144,9 @@ enum {
MTIP_DDF_REMOVE_PENDING_BIT = 1,
MTIP_DDF_OVER_TEMP_BIT = 2,
MTIP_DDF_WRITE_PROTECT_BIT = 3,
- MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) |
- (1 << MTIP_DDF_SEC_LOCK_BIT) |
- (1 << MTIP_DDF_OVER_TEMP_BIT) |
+ MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_DDF_SEC_LOCK_BIT) | \
+ (1 << MTIP_DDF_OVER_TEMP_BIT) | \
(1 << MTIP_DDF_WRITE_PROTECT_BIT)),
MTIP_DDF_CLEANUP_BIT = 5,
@@ -180,7 +180,7 @@ struct mtip_work {
#define MTIP_TRIM_TIMEOUT_MS 240000
#define MTIP_MAX_TRIM_ENTRIES 8
-#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
+#define MTIP_MAX_TRIM_ENTRY_LEN 0xfff8
struct mtip_trim_entry {
u32 lba; /* starting lba of region */
@@ -501,10 +501,6 @@ struct driver_data {
atomic_t irq_workers_active;
int isr_binding;
-
- struct list_head online_list; /* linkage for online list */
-
- struct list_head remove_list; /* linkage for removing list */
};
#endif
diff --git a/trunk/drivers/block/rbd.c b/trunk/drivers/block/rbd.c
index b7b7a88d9f68..f556f8a8b3f9 100644
--- a/trunk/drivers/block/rbd.c
+++ b/trunk/drivers/block/rbd.c
@@ -1742,10 +1742,9 @@ static int rbd_img_request_submit(struct rbd_img_request *img_request)
struct rbd_device *rbd_dev = img_request->rbd_dev;
struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
struct rbd_obj_request *obj_request;
- struct rbd_obj_request *next_obj_request;
dout("%s: img %p\n", __func__, img_request);
- for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
+ for_each_obj_request(img_request, obj_request) {
int ret;
obj_request->callback = rbd_img_obj_callback;
diff --git a/trunk/drivers/char/hpet.c b/trunk/drivers/char/hpet.c
index d784650d14f0..e3f9a99b8522 100644
--- a/trunk/drivers/char/hpet.c
+++ b/trunk/drivers/char/hpet.c
@@ -373,14 +373,26 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma)
struct hpet_dev *devp;
unsigned long addr;
+ if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff)
+ return -EINVAL;
+
devp = file->private_data;
addr = devp->hd_hpets->hp_hpet_phys;
if (addr & (PAGE_SIZE - 1))
return -ENOSYS;
+ vma->vm_flags |= VM_IO;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, addr, PAGE_SIZE);
+
+ if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot)) {
+ printk(KERN_ERR "%s: io_remap_pfn_range failed\n",
+ __func__);
+ return -EAGAIN;
+ }
+
+ return 0;
#else
return -ENOSYS;
#endif
diff --git a/trunk/drivers/char/hw_random/core.c b/trunk/drivers/char/hw_random/core.c
index a0f7724852eb..69ae5972713c 100644
--- a/trunk/drivers/char/hw_random/core.c
+++ b/trunk/drivers/char/hw_random/core.c
@@ -380,15 +380,6 @@ void hwrng_unregister(struct hwrng *rng)
}
EXPORT_SYMBOL_GPL(hwrng_unregister);
-static void __exit hwrng_exit(void)
-{
- mutex_lock(&rng_mutex);
- BUG_ON(current_rng);
- kfree(rng_buffer);
- mutex_unlock(&rng_mutex);
-}
-
-module_exit(hwrng_exit);
MODULE_DESCRIPTION("H/W Random Number Generator (RNG) driver");
MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/char/virtio_console.c b/trunk/drivers/char/virtio_console.c
index ce5f3fc25d6d..e905d5f53051 100644
--- a/trunk/drivers/char/virtio_console.c
+++ b/trunk/drivers/char/virtio_console.c
@@ -149,8 +149,7 @@ struct ports_device {
spinlock_t ports_lock;
/* To protect the vq operations for the control channel */
- spinlock_t c_ivq_lock;
- spinlock_t c_ovq_lock;
+ spinlock_t cvq_lock;
/* The current config space is stored here */
struct virtio_console_config config;
@@ -570,14 +569,11 @@ static ssize_t __send_control_msg(struct ports_device *portdev, u32 port_id,
vq = portdev->c_ovq;
sg_init_one(sg, &cpkt, sizeof(cpkt));
-
- spin_lock(&portdev->c_ovq_lock);
if (virtqueue_add_buf(vq, sg, 1, 0, &cpkt, GFP_ATOMIC) == 0) {
virtqueue_kick(vq);
while (!virtqueue_get_buf(vq, &len))
cpu_relax();
}
- spin_unlock(&portdev->c_ovq_lock);
return 0;
}
@@ -1440,7 +1436,7 @@ static int add_port(struct ports_device *portdev, u32 id)
* rproc_serial does not want the console port, only
* the generic port implementation.
*/
- port->host_connected = true;
+ port->host_connected = port->guest_connected = true;
else if (!use_multiport(port->portdev)) {
/*
* If we're not using multiport support,
@@ -1713,23 +1709,23 @@ static void control_work_handler(struct work_struct *work)
portdev = container_of(work, struct ports_device, control_work);
vq = portdev->c_ivq;
- spin_lock(&portdev->c_ivq_lock);
+ spin_lock(&portdev->cvq_lock);
while ((buf = virtqueue_get_buf(vq, &len))) {
- spin_unlock(&portdev->c_ivq_lock);
+ spin_unlock(&portdev->cvq_lock);
buf->len = len;
buf->offset = 0;
handle_control_message(portdev, buf);
- spin_lock(&portdev->c_ivq_lock);
+ spin_lock(&portdev->cvq_lock);
if (add_inbuf(portdev->c_ivq, buf) < 0) {
dev_warn(&portdev->vdev->dev,
"Error adding buffer to queue\n");
free_buf(buf, false);
}
}
- spin_unlock(&portdev->c_ivq_lock);
+ spin_unlock(&portdev->cvq_lock);
}
static void out_intr(struct virtqueue *vq)
@@ -1756,23 +1752,13 @@ static void in_intr(struct virtqueue *vq)
port->inbuf = get_inbuf(port);
/*
- * Normally the port should not accept data when the port is
- * closed. For generic serial ports, the host won't (shouldn't)
- * send data till the guest is connected. But this condition
+ * Don't queue up data when port is closed. This condition
* can be reached when a console port is not yet connected (no
- * tty is spawned) and the other side sends out data over the
- * vring, or when a remote devices start sending data before
- * the ports are opened.
- *
- * A generic serial port will discard data if not connected,
- * while console ports and rproc-serial ports accepts data at
- * any time. rproc-serial is initiated with guest_connected to
- * false because port_fops_open expects this. Console ports are
- * hooked up with an HVC console and is initialized with
- * guest_connected to true.
+ * tty is spawned) and the host sends out data to console
+ * ports. For generic serial ports, the host won't
+ * (shouldn't) send data till the guest is connected.
*/
-
- if (!port->guest_connected && !is_rproc_serial(port->portdev->vdev))
+ if (!port->guest_connected)
discard_port_data(port);
spin_unlock_irqrestore(&port->inbuf_lock, flags);
@@ -2000,12 +1986,10 @@ static int virtcons_probe(struct virtio_device *vdev)
if (multiport) {
unsigned int nr_added_bufs;
- spin_lock_init(&portdev->c_ivq_lock);
- spin_lock_init(&portdev->c_ovq_lock);
+ spin_lock_init(&portdev->cvq_lock);
INIT_WORK(&portdev->control_work, &control_work_handler);
- nr_added_bufs = fill_queue(portdev->c_ivq,
- &portdev->c_ivq_lock);
+ nr_added_bufs = fill_queue(portdev->c_ivq, &portdev->cvq_lock);
if (!nr_added_bufs) {
dev_err(&vdev->dev,
"Error allocating buffers for control queue\n");
@@ -2156,7 +2140,7 @@ static int virtcons_restore(struct virtio_device *vdev)
return ret;
if (use_multiport(portdev))
- fill_queue(portdev->c_ivq, &portdev->c_ivq_lock);
+ fill_queue(portdev->c_ivq, &portdev->cvq_lock);
list_for_each_entry(port, &portdev->ports, list) {
port->in_vq = portdev->in_vqs[port->id];
diff --git a/trunk/drivers/clk/tegra/clk-tegra20.c b/trunk/drivers/clk/tegra/clk-tegra20.c
index f873dcefe0de..1e2de7305362 100644
--- a/trunk/drivers/clk/tegra/clk-tegra20.c
+++ b/trunk/drivers/clk/tegra/clk-tegra20.c
@@ -703,7 +703,7 @@ static void tegra20_pll_init(void)
clks[pll_a_out0] = clk;
/* PLLE */
- clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, pmc_base,
+ clk = tegra_clk_register_plle("pll_e", "pll_ref", clk_base, NULL,
0, 100000000, &pll_e_params,
0, pll_e_freq_table, NULL);
clk_register_clkdev(clk, "pll_e", NULL);
diff --git a/trunk/drivers/cpufreq/cpufreq-cpu0.c b/trunk/drivers/cpufreq/cpufreq-cpu0.c
index 37d23a0f8c56..4e5b7fb8927c 100644
--- a/trunk/drivers/cpufreq/cpufreq-cpu0.c
+++ b/trunk/drivers/cpufreq/cpufreq-cpu0.c
@@ -178,16 +178,10 @@ static struct cpufreq_driver cpu0_cpufreq_driver = {
static int cpu0_cpufreq_probe(struct platform_device *pdev)
{
- struct device_node *np, *parent;
+ struct device_node *np;
int ret;
- parent = of_find_node_by_path("/cpus");
- if (!parent) {
- pr_err("failed to find OF /cpus\n");
- return -ENOENT;
- }
-
- for_each_child_of_node(parent, np) {
+ for_each_child_of_node(of_find_node_by_path("/cpus"), np) {
if (of_get_property(np, "operating-points", NULL))
break;
}
diff --git a/trunk/drivers/cpufreq/cpufreq_governor.h b/trunk/drivers/cpufreq/cpufreq_governor.h
index cc4bd2f6838a..46bde01eee62 100644
--- a/trunk/drivers/cpufreq/cpufreq_governor.h
+++ b/trunk/drivers/cpufreq/cpufreq_governor.h
@@ -14,8 +14,8 @@
* published by the Free Software Foundation.
*/
-#ifndef _CPUFREQ_GOVERNOR_H
-#define _CPUFREQ_GOVERNOR_H
+#ifndef _CPUFREQ_GOVERNER_H
+#define _CPUFREQ_GOVERNER_H
#include
#include
@@ -175,4 +175,4 @@ bool need_load_eval(struct cpu_dbs_common_info *cdbs,
unsigned int sampling_rate);
int cpufreq_governor_dbs(struct dbs_data *dbs_data,
struct cpufreq_policy *policy, unsigned int event);
-#endif /* _CPUFREQ_GOVERNOR_H */
+#endif /* _CPUFREQ_GOVERNER_H */
diff --git a/trunk/drivers/cpufreq/intel_pstate.c b/trunk/drivers/cpufreq/intel_pstate.c
index 6133ef5cf671..ad72922919ed 100644
--- a/trunk/drivers/cpufreq/intel_pstate.c
+++ b/trunk/drivers/cpufreq/intel_pstate.c
@@ -502,6 +502,7 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
sample_time = cpu->pstate_policy->sample_rate_ms;
delay = msecs_to_jiffies(sample_time);
+ delay -= jiffies % delay;
mod_timer_pinned(&cpu->timer, jiffies + delay);
}
diff --git a/trunk/drivers/crypto/ux500/cryp/cryp_core.c b/trunk/drivers/crypto/ux500/cryp/cryp_core.c
index 22c9063e0120..8bc5fef07e7a 100644
--- a/trunk/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/trunk/drivers/crypto/ux500/cryp/cryp_core.c
@@ -1750,7 +1750,7 @@ static struct platform_driver cryp_driver = {
.shutdown = ux500_cryp_shutdown,
.driver = {
.owner = THIS_MODULE,
- .name = "cryp1",
+ .name = "cryp1"
.pm = &ux500_cryp_pm,
}
};
diff --git a/trunk/drivers/dma/Kconfig b/trunk/drivers/dma/Kconfig
index aeaea32bcfda..80b69971cf28 100644
--- a/trunk/drivers/dma/Kconfig
+++ b/trunk/drivers/dma/Kconfig
@@ -83,7 +83,6 @@ config INTEL_IOP_ADMA
config DW_DMAC
tristate "Synopsys DesignWare AHB DMA support"
- depends on GENERIC_HARDIRQS
select DMA_ENGINE
default y if CPU_AT32AP7000
help
diff --git a/trunk/drivers/dma/at_hdmac.c b/trunk/drivers/dma/at_hdmac.c
index 88cfc61329d2..6e13f262139a 100644
--- a/trunk/drivers/dma/at_hdmac.c
+++ b/trunk/drivers/dma/at_hdmac.c
@@ -310,6 +310,8 @@ static void atc_complete_all(struct at_dma_chan *atchan)
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
+ BUG_ON(atc_chan_is_enabled(atchan));
+
/*
* Submit queued descriptors ASAP, i.e. before we go through
* the completed ones.
@@ -366,9 +368,6 @@ static void atc_advance_work(struct at_dma_chan *atchan)
{
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
- if (atc_chan_is_enabled(atchan))
- return;
-
if (list_empty(&atchan->active_list) ||
list_is_singular(&atchan->active_list)) {
atc_complete_all(atchan);
@@ -1079,7 +1078,9 @@ static void atc_issue_pending(struct dma_chan *chan)
return;
spin_lock_irqsave(&atchan->lock, flags);
- atc_advance_work(atchan);
+ if (!atc_chan_is_enabled(atchan)) {
+ atc_advance_work(atchan);
+ }
spin_unlock_irqrestore(&atchan->lock, flags);
}
diff --git a/trunk/drivers/dma/omap-dma.c b/trunk/drivers/dma/omap-dma.c
index 08b43bf37158..c4b4fd2acc42 100644
--- a/trunk/drivers/dma/omap-dma.c
+++ b/trunk/drivers/dma/omap-dma.c
@@ -276,20 +276,12 @@ static void omap_dma_issue_pending(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags);
if (vchan_issue_pending(&c->vc) && !c->desc) {
- /*
- * c->cyclic is used only by audio and in this case the DMA need
- * to be started without delay.
- */
- if (!c->cyclic) {
- struct omap_dmadev *d = to_omap_dma_dev(chan->device);
- spin_lock(&d->lock);
- if (list_empty(&c->node))
- list_add_tail(&c->node, &d->pending);
- spin_unlock(&d->lock);
- tasklet_schedule(&d->task);
- } else {
- omap_dma_start_desc(c);
- }
+ struct omap_dmadev *d = to_omap_dma_dev(chan->device);
+ spin_lock(&d->lock);
+ if (list_empty(&c->node))
+ list_add_tail(&c->node, &d->pending);
+ spin_unlock(&d->lock);
+ tasklet_schedule(&d->task);
}
spin_unlock_irqrestore(&c->vc.lock, flags);
}
diff --git a/trunk/drivers/dma/pl330.c b/trunk/drivers/dma/pl330.c
index 5dbc5946c4c3..718153122759 100644
--- a/trunk/drivers/dma/pl330.c
+++ b/trunk/drivers/dma/pl330.c
@@ -2882,7 +2882,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
{
struct dma_pl330_platdata *pdat;
struct dma_pl330_dmac *pdmac;
- struct dma_pl330_chan *pch, *_p;
+ struct dma_pl330_chan *pch;
struct pl330_info *pi;
struct dma_device *pd;
struct resource *res;
@@ -2984,16 +2984,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
ret = dma_async_device_register(pd);
if (ret) {
dev_err(&adev->dev, "unable to register DMAC\n");
- goto probe_err3;
- }
-
- if (adev->dev.of_node) {
- ret = of_dma_controller_register(adev->dev.of_node,
- of_dma_pl330_xlate, pdmac);
- if (ret) {
- dev_err(&adev->dev,
- "unable to register DMA to the generic DT DMA helpers\n");
- }
+ goto probe_err2;
}
dev_info(&adev->dev,
@@ -3004,21 +2995,16 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pi->pcfg.data_bus_width / 8, pi->pcfg.num_chan,
pi->pcfg.num_peri, pi->pcfg.num_events);
- return 0;
-probe_err3:
- amba_set_drvdata(adev, NULL);
-
- /* Idle the DMAC */
- list_for_each_entry_safe(pch, _p, &pdmac->ddma.channels,
- chan.device_node) {
+ ret = of_dma_controller_register(adev->dev.of_node,
+ of_dma_pl330_xlate, pdmac);
+ if (ret) {
+ dev_err(&adev->dev,
+ "unable to register DMA to the generic DT DMA helpers\n");
+ goto probe_err2;
+ }
- /* Remove the channel */
- list_del(&pch->chan.device_node);
+ return 0;
- /* Flush the channel */
- pl330_control(&pch->chan, DMA_TERMINATE_ALL, 0);
- pl330_free_chan_resources(&pch->chan);
- }
probe_err2:
pl330_del(pi);
probe_err1:
@@ -3037,10 +3023,8 @@ static int pl330_remove(struct amba_device *adev)
if (!pdmac)
return 0;
- if (adev->dev.of_node)
- of_dma_controller_free(adev->dev.of_node);
+ of_dma_controller_free(adev->dev.of_node);
- dma_async_device_unregister(&pdmac->ddma);
amba_set_drvdata(adev, NULL);
/* Idle the DMAC */
diff --git a/trunk/drivers/eisa/pci_eisa.c b/trunk/drivers/eisa/pci_eisa.c
index 6c3fca97d346..cdae207028a7 100644
--- a/trunk/drivers/eisa/pci_eisa.c
+++ b/trunk/drivers/eisa/pci_eisa.c
@@ -19,10 +19,10 @@
/* There is only *one* pci_eisa device per machine, right ? */
static struct eisa_root_device pci_eisa_root;
-static int __init pci_eisa_init(struct pci_dev *pdev)
+static int __init pci_eisa_init(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
{
- int rc, i;
- struct resource *res, *bus_res = NULL;
+ int rc;
if ((rc = pci_enable_device (pdev))) {
printk (KERN_ERR "pci_eisa : Could not enable device %s\n",
@@ -30,30 +30,9 @@ static int __init pci_eisa_init(struct pci_dev *pdev)
return rc;
}
- /*
- * The Intel 82375 PCI-EISA bridge is a subtractive-decode PCI
- * device, so the resources available on EISA are the same as those
- * available on the 82375 bus. This works the same as a PCI-PCI
- * bridge in subtractive-decode mode (see pci_read_bridge_bases()).
- * We assume other PCI-EISA bridges are similar.
- *
- * eisa_root_register() can only deal with a single io port resource,
- * so we use the first valid io port resource.
- */
- pci_bus_for_each_resource(pdev->bus, res, i)
- if (res && (res->flags & IORESOURCE_IO)) {
- bus_res = res;
- break;
- }
-
- if (!bus_res) {
- dev_err(&pdev->dev, "No resources available\n");
- return -1;
- }
-
pci_eisa_root.dev = &pdev->dev;
- pci_eisa_root.res = bus_res;
- pci_eisa_root.bus_base_addr = bus_res->start;
+ pci_eisa_root.res = pdev->bus->resource[0];
+ pci_eisa_root.bus_base_addr = pdev->bus->resource[0]->start;
pci_eisa_root.slots = EISA_MAX_SLOTS;
pci_eisa_root.dma_mask = pdev->dma_mask;
dev_set_drvdata(pci_eisa_root.dev, &pci_eisa_root);
@@ -66,26 +45,22 @@ static int __init pci_eisa_init(struct pci_dev *pdev)
return 0;
}
-/*
- * We have to call pci_eisa_init_early() before pnpacpi_init()/isapnp_init().
- * Otherwise pnp resource will get enabled early and could prevent eisa
- * to be initialized.
- * Also need to make sure pci_eisa_init_early() is called after
- * x86/pci_subsys_init().
- * So need to use subsys_initcall_sync with it.
- */
-static int __init pci_eisa_init_early(void)
-{
- struct pci_dev *dev = NULL;
- int ret;
+static struct pci_device_id pci_eisa_pci_tbl[] = {
+ { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_CLASS_BRIDGE_EISA << 8, 0xffff00, 0 },
+ { 0, }
+};
- for_each_pci_dev(dev)
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_EISA) {
- ret = pci_eisa_init(dev);
- if (ret)
- return ret;
- }
+static struct pci_driver __refdata pci_eisa_driver = {
+ .name = "pci_eisa",
+ .id_table = pci_eisa_pci_tbl,
+ .probe = pci_eisa_init,
+};
- return 0;
+static int __init pci_eisa_init_module (void)
+{
+ return pci_register_driver (&pci_eisa_driver);
}
-subsys_initcall_sync(pci_eisa_init_early);
+
+device_initcall(pci_eisa_init_module);
+MODULE_DEVICE_TABLE(pci, pci_eisa_pci_tbl);
diff --git a/trunk/drivers/firmware/Kconfig b/trunk/drivers/firmware/Kconfig
index 3e532002e4d1..42c759a4d047 100644
--- a/trunk/drivers/firmware/Kconfig
+++ b/trunk/drivers/firmware/Kconfig
@@ -39,7 +39,6 @@ config FIRMWARE_MEMMAP
config EFI_VARS
tristate "EFI Variable Support via sysfs"
depends on EFI
- select UCS2_STRING
default n
help
If you say Y here, you are able to get EFI (Extensible Firmware
diff --git a/trunk/drivers/firmware/efivars.c b/trunk/drivers/firmware/efivars.c
index 182ce9471175..7acafb80fd4c 100644
--- a/trunk/drivers/firmware/efivars.c
+++ b/trunk/drivers/firmware/efivars.c
@@ -80,7 +80,6 @@
#include
#include
#include
-#include
#include
#include
@@ -173,6 +172,51 @@ static void efivar_update_sysfs_entries(struct work_struct *);
static DECLARE_WORK(efivar_work, efivar_update_sysfs_entries);
static bool efivar_wq_enabled = true;
+/* Return the number of unicode characters in data */
+static unsigned long
+utf16_strnlen(efi_char16_t *s, size_t maxlength)
+{
+ unsigned long length = 0;
+
+ while (*s++ != 0 && length < maxlength)
+ length++;
+ return length;
+}
+
+static inline unsigned long
+utf16_strlen(efi_char16_t *s)
+{
+ return utf16_strnlen(s, ~0UL);
+}
+
+/*
+ * Return the number of bytes is the length of this string
+ * Note: this is NOT the same as the number of unicode characters
+ */
+static inline unsigned long
+utf16_strsize(efi_char16_t *data, unsigned long maxlength)
+{
+ return utf16_strnlen(data, maxlength/sizeof(efi_char16_t)) * sizeof(efi_char16_t);
+}
+
+static inline int
+utf16_strncmp(const efi_char16_t *a, const efi_char16_t *b, size_t len)
+{
+ while (1) {
+ if (len == 0)
+ return 0;
+ if (*a < *b)
+ return -1;
+ if (*a > *b)
+ return 1;
+ if (*a == 0) /* implies *b == 0 */
+ return 0;
+ a++;
+ b++;
+ len--;
+ }
+}
+
static bool
validate_device_path(struct efi_variable *var, int match, u8 *buffer,
unsigned long len)
@@ -224,7 +268,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
u16 filepathlength;
int i, desclength = 0, namelen;
- namelen = ucs2_strnlen(var->VariableName, sizeof(var->VariableName));
+ namelen = utf16_strnlen(var->VariableName, sizeof(var->VariableName));
/* Either "Boot" or "Driver" followed by four digits of hex */
for (i = match; i < match+4; i++) {
@@ -247,7 +291,7 @@ validate_load_option(struct efi_variable *var, int match, u8 *buffer,
* There's no stored length for the description, so it has to be
* found by hand
*/
- desclength = ucs2_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
+ desclength = utf16_strsize((efi_char16_t *)(buffer + 6), len - 6) + 2;
/* Each boot entry must have a descriptor */
if (!desclength)
@@ -392,12 +436,24 @@ static efi_status_t
check_var_size_locked(struct efivars *efivars, u32 attributes,
unsigned long size)
{
+ u64 storage_size, remaining_size, max_size;
+ efi_status_t status;
const struct efivar_operations *fops = efivars->ops;
- if (!efivars->ops->query_variable_store)
+ if (!efivars->ops->query_variable_info)
return EFI_UNSUPPORTED;
- return fops->query_variable_store(attributes, size);
+ status = fops->query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (!storage_size || size > remaining_size || size > max_size ||
+ (remaining_size - size) < (storage_size / 2))
+ return EFI_OUT_OF_RESOURCES;
+
+ return status;
}
@@ -537,7 +593,7 @@ efivar_store_raw(struct efivar_entry *entry, const char *buf, size_t count)
spin_lock_irq(&efivars->lock);
status = check_var_size_locked(efivars, new_var->Attributes,
- new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
+ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
if (status == EFI_SUCCESS || status == EFI_UNSUPPORTED)
status = efivars->ops->set_variable(new_var->VariableName,
@@ -715,7 +771,7 @@ static ssize_t efivarfs_file_write(struct file *file,
* QueryVariableInfo() isn't supported by the firmware.
*/
- varsize = datasize + ucs2_strsize(var->var.VariableName, 1024);
+ varsize = datasize + utf16_strsize(var->var.VariableName, 1024);
status = check_var_size(efivars, attributes, varsize);
if (status != EFI_SUCCESS) {
@@ -1167,7 +1223,7 @@ static int efivarfs_fill_super(struct super_block *sb, void *data, int silent)
inode = NULL;
- len = ucs2_strlen(entry->var.VariableName);
+ len = utf16_strlen(entry->var.VariableName);
/* name, plus '-', plus GUID, plus NUL*/
name = kmalloc(len + 1 + GUID_LEN + 1, GFP_ATOMIC);
@@ -1425,8 +1481,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
if (efi_guidcmp(entry->var.VendorGuid, vendor))
continue;
- if (ucs2_strncmp(entry->var.VariableName, efi_name,
- ucs2_strlen(efi_name))) {
+ if (utf16_strncmp(entry->var.VariableName, efi_name,
+ utf16_strlen(efi_name))) {
/*
* Check if an old format,
* which doesn't support holding
@@ -1438,8 +1494,8 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
for (i = 0; i < DUMP_NAME_LEN; i++)
efi_name_old[i] = name_old[i];
- if (ucs2_strncmp(entry->var.VariableName, efi_name_old,
- ucs2_strlen(efi_name_old)))
+ if (utf16_strncmp(entry->var.VariableName, efi_name_old,
+ utf16_strlen(efi_name_old)))
continue;
}
@@ -1517,8 +1573,8 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = ucs2_strsize(new_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(new_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
new_var->VariableName, strsize1) &&
@@ -1534,7 +1590,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
}
status = check_var_size_locked(efivars, new_var->Attributes,
- new_var->DataSize + ucs2_strsize(new_var->VariableName, 1024));
+ new_var->DataSize + utf16_strsize(new_var->VariableName, 1024));
if (status && status != EFI_UNSUPPORTED) {
spin_unlock_irq(&efivars->lock);
@@ -1558,7 +1614,7 @@ static ssize_t efivar_create(struct file *filp, struct kobject *kobj,
/* Create the entry in sysfs. Locking is not required here */
status = efivar_create_sysfs_entry(efivars,
- ucs2_strsize(new_var->VariableName,
+ utf16_strsize(new_var->VariableName,
1024),
new_var->VariableName,
&new_var->VendorGuid);
@@ -1588,8 +1644,8 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
* Does this variable already exist?
*/
list_for_each_entry_safe(search_efivar, n, &efivars->list, list) {
- strsize1 = ucs2_strsize(search_efivar->var.VariableName, 1024);
- strsize2 = ucs2_strsize(del_var->VariableName, 1024);
+ strsize1 = utf16_strsize(search_efivar->var.VariableName, 1024);
+ strsize2 = utf16_strsize(del_var->VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(&(search_efivar->var.VariableName),
del_var->VariableName, strsize1) &&
@@ -1635,9 +1691,9 @@ static bool variable_is_present(efi_char16_t *variable_name, efi_guid_t *vendor)
unsigned long strsize1, strsize2;
bool found = false;
- strsize1 = ucs2_strsize(variable_name, 1024);
+ strsize1 = utf16_strsize(variable_name, 1024);
list_for_each_entry_safe(entry, n, &efivars->list, list) {
- strsize2 = ucs2_strsize(entry->var.VariableName, 1024);
+ strsize2 = utf16_strsize(entry->var.VariableName, 1024);
if (strsize1 == strsize2 &&
!memcmp(variable_name, &(entry->var.VariableName),
strsize2) &&
@@ -2075,7 +2131,7 @@ efivars_init(void)
ops.get_variable = efi.get_variable;
ops.set_variable = efi.set_variable;
ops.get_next_variable = efi.get_next_variable;
- ops.query_variable_store = efi_query_variable_store;
+ ops.query_variable_info = efi.query_variable_info;
error = register_efivars(&__efivars, &ops, efi_kobj);
if (error)
diff --git a/trunk/drivers/gpio/gpio-ich.c b/trunk/drivers/gpio/gpio-ich.c
index de3c317bd3e2..f9dbd503fc40 100644
--- a/trunk/drivers/gpio/gpio-ich.c
+++ b/trunk/drivers/gpio/gpio-ich.c
@@ -214,7 +214,7 @@ static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
* If it can't be trusted, assume that the pin can be used as a GPIO.
*/
if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
- return 0;
+ return 1;
return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
}
diff --git a/trunk/drivers/gpio/gpio-pca953x.c b/trunk/drivers/gpio/gpio-pca953x.c
index 9391cf16e990..24059462c87f 100644
--- a/trunk/drivers/gpio/gpio-pca953x.c
+++ b/trunk/drivers/gpio/gpio-pca953x.c
@@ -575,7 +575,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip,
chip->gpio_chip.ngpio,
irq_base,
&pca953x_irq_simple_ops,
- chip);
+ NULL);
if (!chip->domain)
return -ENODEV;
diff --git a/trunk/drivers/gpio/gpio-stmpe.c b/trunk/drivers/gpio/gpio-stmpe.c
index 3ce5bc38ac31..770476a9da87 100644
--- a/trunk/drivers/gpio/gpio-stmpe.c
+++ b/trunk/drivers/gpio/gpio-stmpe.c
@@ -307,15 +307,11 @@ static const struct irq_domain_ops stmpe_gpio_irq_simple_ops = {
.xlate = irq_domain_xlate_twocell,
};
-static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio,
- struct device_node *np)
+static int stmpe_gpio_irq_init(struct stmpe_gpio *stmpe_gpio)
{
- int base = 0;
+ int base = stmpe_gpio->irq_base;
- if (!np)
- base = stmpe_gpio->irq_base;
-
- stmpe_gpio->domain = irq_domain_add_simple(np,
+ stmpe_gpio->domain = irq_domain_add_simple(NULL,
stmpe_gpio->chip.ngpio, base,
&stmpe_gpio_irq_simple_ops, stmpe_gpio);
if (!stmpe_gpio->domain) {
@@ -350,9 +346,6 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
stmpe_gpio->chip = template_chip;
stmpe_gpio->chip.ngpio = stmpe->num_gpios;
stmpe_gpio->chip.dev = &pdev->dev;
-#ifdef CONFIG_OF
- stmpe_gpio->chip.of_node = np;
-#endif
stmpe_gpio->chip.base = pdata ? pdata->gpio_base : -1;
if (pdata)
@@ -373,7 +366,7 @@ static int stmpe_gpio_probe(struct platform_device *pdev)
goto out_free;
if (irq >= 0) {
- ret = stmpe_gpio_irq_init(stmpe_gpio, np);
+ ret = stmpe_gpio_irq_init(stmpe_gpio);
if (ret)
goto out_disable;
diff --git a/trunk/drivers/gpu/drm/drm_crtc.c b/trunk/drivers/gpu/drm/drm_crtc.c
index dd64a06dc5b4..792c3e3795ca 100644
--- a/trunk/drivers/gpu/drm/drm_crtc.c
+++ b/trunk/drivers/gpu/drm/drm_crtc.c
@@ -2326,6 +2326,7 @@ int drm_mode_addfb(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, &r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
+ drm_modeset_unlock_all(dev);
return PTR_ERR(fb);
}
@@ -2505,6 +2506,7 @@ int drm_mode_addfb2(struct drm_device *dev,
fb = dev->mode_config.funcs->fb_create(dev, file_priv, r);
if (IS_ERR(fb)) {
DRM_DEBUG_KMS("could not create framebuffer\n");
+ drm_modeset_unlock_all(dev);
return PTR_ERR(fb);
}
diff --git a/trunk/drivers/gpu/drm/drm_fb_helper.c b/trunk/drivers/gpu/drm/drm_fb_helper.c
index 892ff9f95975..59d6b9bf204b 100644
--- a/trunk/drivers/gpu/drm/drm_fb_helper.c
+++ b/trunk/drivers/gpu/drm/drm_fb_helper.c
@@ -1544,10 +1544,10 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
if (!fb_helper->fb)
return 0;
- mutex_lock(&fb_helper->dev->mode_config.mutex);
+ drm_modeset_lock_all(dev);
if (!drm_fb_helper_is_bound(fb_helper)) {
fb_helper->delayed_hotplug = true;
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
+ drm_modeset_unlock_all(dev);
return 0;
}
DRM_DEBUG_KMS("\n");
@@ -1558,11 +1558,9 @@ int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
- mutex_unlock(&fb_helper->dev->mode_config.mutex);
-
- drm_modeset_lock_all(dev);
drm_setup_crtcs(fb_helper);
drm_modeset_unlock_all(dev);
+
drm_fb_helper_set_par(fb_helper->fbdev);
return 0;
diff --git a/trunk/drivers/gpu/drm/drm_fops.c b/trunk/drivers/gpu/drm/drm_fops.c
index 429e07d0b0f1..13fdcd10a605 100644
--- a/trunk/drivers/gpu/drm/drm_fops.c
+++ b/trunk/drivers/gpu/drm/drm_fops.c
@@ -123,7 +123,6 @@ int drm_open(struct inode *inode, struct file *filp)
int retcode = 0;
int need_setup = 0;
struct address_space *old_mapping;
- struct address_space *old_imapping;
minor = idr_find(&drm_minors_idr, minor_id);
if (!minor)
@@ -138,7 +137,6 @@ int drm_open(struct inode *inode, struct file *filp)
if (!dev->open_count++)
need_setup = 1;
mutex_lock(&dev->struct_mutex);
- old_imapping = inode->i_mapping;
old_mapping = dev->dev_mapping;
if (old_mapping == NULL)
dev->dev_mapping = &inode->i_data;
@@ -161,8 +159,8 @@ int drm_open(struct inode *inode, struct file *filp)
err_undo:
mutex_lock(&dev->struct_mutex);
- filp->f_mapping = old_imapping;
- inode->i_mapping = old_imapping;
+ filp->f_mapping = old_mapping;
+ inode->i_mapping = old_mapping;
iput(container_of(dev->dev_mapping, struct inode, i_data));
dev->dev_mapping = old_mapping;
mutex_unlock(&dev->struct_mutex);
diff --git a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index 9a48e1a2d417..3b11ab0fbc96 100644
--- a/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/trunk/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -57,7 +57,7 @@ eb_create(struct drm_i915_gem_execbuffer2 *args)
if (eb == NULL) {
int size = args->buffer_count;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
- BUILD_BUG_ON_NOT_POWER_OF_2(PAGE_SIZE / sizeof(struct hlist_head));
+ BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
diff --git a/trunk/drivers/gpu/drm/i915/intel_crt.c b/trunk/drivers/gpu/drm/i915/intel_crt.c
index 1ce45a0a2d3e..32a3693905ec 100644
--- a/trunk/drivers/gpu/drm/i915/intel_crt.c
+++ b/trunk/drivers/gpu/drm/i915/intel_crt.c
@@ -45,9 +45,6 @@
struct intel_crt {
struct intel_encoder base;
- /* DPMS state is stored in the connector, which we need in the
- * encoder's enable/disable callbacks */
- struct intel_connector *connector;
bool force_hotplug_required;
u32 adpa_reg;
};
@@ -84,6 +81,29 @@ static bool intel_crt_get_hw_state(struct intel_encoder *encoder,
return true;
}
+static void intel_disable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
+
+ temp = I915_READ(crt->adpa_reg);
+ temp |= ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE;
+ temp &= ~ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
+static void intel_enable_crt(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = encoder->base.dev->dev_private;
+ struct intel_crt *crt = intel_encoder_to_crt(encoder);
+ u32 temp;
+
+ temp = I915_READ(crt->adpa_reg);
+ temp |= ADPA_DAC_ENABLE;
+ I915_WRITE(crt->adpa_reg, temp);
+}
+
/* Note: The caller is required to filter out dpms modes not supported by the
* platform. */
static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -115,19 +135,6 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
I915_WRITE(crt->adpa_reg, temp);
}
-static void intel_disable_crt(struct intel_encoder *encoder)
-{
- intel_crt_set_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void intel_enable_crt(struct intel_encoder *encoder)
-{
- struct intel_crt *crt = intel_encoder_to_crt(encoder);
-
- intel_crt_set_dpms(encoder, crt->connector->base.dpms);
-}
-
-
static void intel_crt_dpms(struct drm_connector *connector, int mode)
{
struct drm_device *dev = connector->dev;
@@ -739,7 +746,6 @@ void intel_crt_init(struct drm_device *dev)
}
connector = &intel_connector->base;
- crt->connector = intel_connector;
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
diff --git a/trunk/drivers/gpu/drm/i915/intel_dp.c b/trunk/drivers/gpu/drm/i915/intel_dp.c
index 8fc93f90a7cd..d7d4afe01341 100644
--- a/trunk/drivers/gpu/drm/i915/intel_dp.c
+++ b/trunk/drivers/gpu/drm/i915/intel_dp.c
@@ -2559,15 +2559,12 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_dp *intel_dp = &intel_dig_port->dp;
- struct drm_device *dev = intel_dp_to_dev(intel_dp);
i2c_del_adapter(&intel_dp->adapter);
drm_encoder_cleanup(encoder);
if (is_edp(intel_dp)) {
cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
- mutex_lock(&dev->mode_config.mutex);
ironlake_panel_vdd_off_sync(intel_dp);
- mutex_unlock(&dev->mode_config.mutex);
}
kfree(intel_dig_port);
}
diff --git a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
index 78d8e919509f..fe22bb780e1d 100644
--- a/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/trunk/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -751,6 +751,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
int i;
unsigned char misc = 0;
unsigned char ext_vga[6];
+ unsigned char ext_vga_index24;
+ unsigned char dac_index90 = 0;
u8 bppshift;
static unsigned char dacvalue[] = {
@@ -801,6 +803,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
option2 = 0x0000b000;
break;
case G200_ER:
+ dac_index90 = 0;
break;
}
@@ -849,8 +852,10 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
WREG_DAC(i, dacvalue[i]);
}
- if (mdev->type == G200_ER)
- WREG_DAC(0x90, 0);
+ if (mdev->type == G200_ER) {
+ WREG_DAC(0x90, dac_index90);
+ }
+
if (option)
pci_write_config_dword(dev->pdev, PCI_MGA_OPTION, option);
@@ -947,6 +952,8 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
if (mdev->type == G200_WB)
ext_vga[1] |= 0x88;
+ ext_vga_index24 = 0x05;
+
/* Set pixel clocks */
misc = 0x2d;
WREG8(MGA_MISC_OUT, misc);
@@ -958,7 +965,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc,
}
if (mdev->type == G200_ER)
- WREG_ECRT(0x24, 0x5);
+ WREG_ECRT(24, ext_vga_index24);
if (mdev->type == G200_EV) {
WREG_ECRT(6, 0);
diff --git a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index 0e2c1a4f1659..e816f06637a7 100644
--- a/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/trunk/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -248,22 +248,6 @@ nouveau_bios_shadow_pci(struct nouveau_bios *bios)
}
}
-static void
-nouveau_bios_shadow_platform(struct nouveau_bios *bios)
-{
- struct pci_dev *pdev = nv_device(bios)->pdev;
- size_t size;
-
- void __iomem *rom = pci_platform_rom(pdev, &size);
- if (rom && size) {
- bios->data = kmalloc(size, GFP_KERNEL);
- if (bios->data) {
- memcpy_fromio(bios->data, rom, size);
- bios->size = size;
- }
- }
-}
-
static int
nouveau_bios_score(struct nouveau_bios *bios, const bool writeable)
{
@@ -304,7 +288,6 @@ nouveau_bios_shadow(struct nouveau_bios *bios)
{ "PROM", nouveau_bios_shadow_prom, false, 0, 0, NULL },
{ "ACPI", nouveau_bios_shadow_acpi, true, 0, 0, NULL },
{ "PCIROM", nouveau_bios_shadow_pci, true, 0, 0, NULL },
- { "PLATFORM", nouveau_bios_shadow_platform, true, 0, 0, NULL },
{}
};
struct methods *mthd, *best;
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 5eb3e0da7c6e..3b6dc883e150 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -391,7 +391,7 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_device *device = nv_device(drm->device);
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan = NULL, *temp;
+ struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
struct nouveau_object *object;
struct nv_dma_class args = {};
@@ -404,11 +404,10 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (unlikely(nv_device(abi16->device)->card_type >= NV_C0))
return nouveau_abi16_put(abi16, -EINVAL);
- list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | info->channel)) {
- chan = temp;
+ list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+ if (chan->chan->handle == (NVDRM_CHAN | info->channel))
break;
- }
+ chan = NULL;
}
if (!chan)
@@ -460,18 +459,17 @@ nouveau_abi16_ioctl_gpuobj_free(ABI16_IOCTL_ARGS)
{
struct drm_nouveau_gpuobj_free *fini = data;
struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev);
- struct nouveau_abi16_chan *chan = NULL, *temp;
+ struct nouveau_abi16_chan *chan, *temp;
struct nouveau_abi16_ntfy *ntfy;
int ret;
if (unlikely(!abi16))
return -ENOMEM;
- list_for_each_entry(temp, &abi16->channels, head) {
- if (temp->chan->handle == (NVDRM_CHAN | fini->channel)) {
- chan = temp;
+ list_for_each_entry_safe(chan, temp, &abi16->channels, head) {
+ if (chan->chan->handle == (NVDRM_CHAN | fini->channel))
break;
- }
+ chan = NULL;
}
if (!chan)
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c
index c95decf543e9..d1099365bfc1 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -71,26 +71,12 @@ module_param_named(modeset, nouveau_modeset, int, 0400);
static struct drm_driver driver;
-static int
-nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
-{
- struct nouveau_drm *drm =
- container_of(event, struct nouveau_drm, vblank[head]);
- drm_handle_vblank(drm->dev, head);
- return NVKM_EVENT_KEEP;
-}
-
static int
nouveau_drm_vblank_enable(struct drm_device *dev, int head)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_disp *pdisp = nouveau_disp(drm->device);
-
- if (WARN_ON_ONCE(head > ARRAY_SIZE(drm->vblank)))
- return -EIO;
- WARN_ON_ONCE(drm->vblank[head].func);
- drm->vblank[head].func = nouveau_drm_vblank_handler;
- nouveau_event_get(pdisp->vblank, head, &drm->vblank[head]);
+ nouveau_event_get(pdisp->vblank, head, &drm->vblank);
return 0;
}
@@ -99,11 +85,16 @@ nouveau_drm_vblank_disable(struct drm_device *dev, int head)
{
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_disp *pdisp = nouveau_disp(drm->device);
- if (drm->vblank[head].func)
- nouveau_event_put(pdisp->vblank, head, &drm->vblank[head]);
- else
- WARN_ON_ONCE(1);
- drm->vblank[head].func = NULL;
+ nouveau_event_put(pdisp->vblank, head, &drm->vblank);
+}
+
+static int
+nouveau_drm_vblank_handler(struct nouveau_eventh *event, int head)
+{
+ struct nouveau_drm *drm =
+ container_of(event, struct nouveau_drm, vblank);
+ drm_handle_vblank(drm->dev, head);
+ return NVKM_EVENT_KEEP;
}
static u64
@@ -301,6 +292,7 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = drm;
drm->dev = dev;
+ drm->vblank.func = nouveau_drm_vblank_handler;
INIT_LIST_HEAD(&drm->clients);
spin_lock_init(&drm->tile.lock);
diff --git a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h
index 9c39bafbef2c..b25df374c901 100644
--- a/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h
+++ b/trunk/drivers/gpu/drm/nouveau/nouveau_drm.h
@@ -113,7 +113,7 @@ struct nouveau_drm {
struct nvbios vbios;
struct nouveau_display *display;
struct backlight_device *backlight;
- struct nouveau_eventh vblank[4];
+ struct nouveau_eventh vblank;
/* power management */
struct nouveau_pm *pm;
diff --git a/trunk/drivers/gpu/drm/nouveau/nv50_display.c b/trunk/drivers/gpu/drm/nouveau/nv50_display.c
index 1ddc03e51bf4..7f0e6c3f37d1 100644
--- a/trunk/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/trunk/drivers/gpu/drm/nouveau/nv50_display.c
@@ -479,7 +479,7 @@ nv50_display_flip_wait(void *data)
{
struct nv50_display_flip *flip = data;
if (nouveau_bo_rd32(flip->disp->sync, flip->chan->addr / 4) ==
- flip->chan->data)
+ flip->chan->data);
return true;
usleep_range(1, 2);
return false;
diff --git a/trunk/drivers/gpu/drm/radeon/radeon_bios.c b/trunk/drivers/gpu/drm/radeon/radeon_bios.c
index fa3c56fba294..b8015913d382 100644
--- a/trunk/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/trunk/drivers/gpu/drm/radeon/radeon_bios.c
@@ -99,29 +99,6 @@ static bool radeon_read_bios(struct radeon_device *rdev)
return true;
}
-static bool radeon_read_platform_bios(struct radeon_device *rdev)
-{
- uint8_t __iomem *bios;
- size_t size;
-
- rdev->bios = NULL;
-
- bios = pci_platform_rom(rdev->pdev, &size);
- if (!bios) {
- return false;
- }
-
- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
- return false;
- }
- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
- if (rdev->bios == NULL) {
- return false;
- }
-
- return true;
-}
-
#ifdef CONFIG_ACPI
/* ATRM is used to get the BIOS on the discrete cards in
* dual-gpu systems.
@@ -643,9 +620,6 @@ bool radeon_get_bios(struct radeon_device *rdev)
if (r == false) {
r = radeon_read_disabled_bios(rdev);
}
- if (r == false) {
- r = radeon_read_platform_bios(rdev);
- }
if (r == false || rdev->bios == NULL) {
DRM_ERROR("Unable to locate a BIOS ROM\n");
rdev->bios = NULL;
diff --git a/trunk/drivers/gpu/drm/udl/udl_connector.c b/trunk/drivers/gpu/drm/udl/udl_connector.c
index b44d548c56f8..fe5cdbcf2636 100644
--- a/trunk/drivers/gpu/drm/udl/udl_connector.c
+++ b/trunk/drivers/gpu/drm/udl/udl_connector.c
@@ -61,10 +61,6 @@ static int udl_get_modes(struct drm_connector *connector)
int ret;
edid = (struct edid *)udl_get_edid(udl);
- if (!edid) {
- drm_mode_connector_update_edid_property(connector, NULL);
- return 0;
- }
/*
* We only read the main block, but if the monitor reports extension
diff --git a/trunk/drivers/hid/hid-core.c b/trunk/drivers/hid/hid-core.c
index aa341d135867..512b01c04ea7 100644
--- a/trunk/drivers/hid/hid-core.c
+++ b/trunk/drivers/hid/hid-core.c
@@ -2077,6 +2077,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HYBRID) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LD, USB_DEVICE_ID_LD_HEATCONTROL) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_BEATPAD) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MASTERKIT, USB_DEVICE_ID_MASTERKIT_MA901RADIO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1024LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MCC, USB_DEVICE_ID_MCC_PMD1208LS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROCHIP, USB_DEVICE_ID_PICKIT1) },
@@ -2243,18 +2244,6 @@ bool hid_ignore(struct hid_device *hdev)
hdev->product <= USB_DEVICE_ID_VELLEMAN_K8061_LAST))
return true;
break;
- case USB_VENDOR_ID_ATMEL_V_USB:
- /* Masterkit MA901 usb radio based on Atmel tiny85 chip and
- * it has the same USB ID as many Atmel V-USB devices. This
- * usb radio is handled by radio-ma901.c driver so we want
- * ignore the hid. Check the name, bus, product and ignore
- * if we have MA901 usb radio.
- */
- if (hdev->product == USB_DEVICE_ID_ATMEL_V_USB &&
- hdev->bus == BUS_USB &&
- strncmp(hdev->name, "www.masterkit.ru MA901", 22) == 0)
- return true;
- break;
}
if (hdev->type == HID_TYPE_USBMOUSE &&
diff --git a/trunk/drivers/hid/hid-ids.h b/trunk/drivers/hid/hid-ids.h
index 5309fd5eb0eb..c4388776f4e4 100644
--- a/trunk/drivers/hid/hid-ids.h
+++ b/trunk/drivers/hid/hid-ids.h
@@ -158,8 +158,6 @@
#define USB_VENDOR_ID_ATMEL 0x03eb
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
#define USB_DEVICE_ID_ATMEL_MXT_DIGITIZER 0x2118
-#define USB_VENDOR_ID_ATMEL_V_USB 0x16c0
-#define USB_DEVICE_ID_ATMEL_V_USB 0x05df
#define USB_VENDOR_ID_AUREAL 0x0755
#define USB_DEVICE_ID_AUREAL_W01RN 0x2626
@@ -559,6 +557,9 @@
#define USB_VENDOR_ID_MADCATZ 0x0738
#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
+#define USB_VENDOR_ID_MASTERKIT 0x16c0
+#define USB_DEVICE_ID_MASTERKIT_MA901RADIO 0x05df
+
#define USB_VENDOR_ID_MCC 0x09db
#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
#define USB_DEVICE_ID_MCC_PMD1208LS 0x007a
diff --git a/trunk/drivers/hid/hid-magicmouse.c b/trunk/drivers/hid/hid-magicmouse.c
index a8ce44296cfd..f7f113ba083e 100644
--- a/trunk/drivers/hid/hid-magicmouse.c
+++ b/trunk/drivers/hid/hid-magicmouse.c
@@ -462,21 +462,6 @@ static int magicmouse_input_mapping(struct hid_device *hdev,
return 0;
}
-static void magicmouse_input_configured(struct hid_device *hdev,
- struct hid_input *hi)
-
-{
- struct magicmouse_sc *msc = hid_get_drvdata(hdev);
-
- int ret = magicmouse_setup_input(msc->input, hdev);
- if (ret) {
- hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
- /* clean msc->input to notify probe() of the failure */
- msc->input = NULL;
- }
-}
-
-
static int magicmouse_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
@@ -508,10 +493,15 @@ static int magicmouse_probe(struct hid_device *hdev,
goto err_free;
}
- if (!msc->input) {
- hid_err(hdev, "magicmouse input not registered\n");
- ret = -ENOMEM;
- goto err_stop_hw;
+ /* We do this after hid-input is done parsing reports so that
+ * hid-input uses the most natural button and axis IDs.
+ */
+ if (msc->input) {
+ ret = magicmouse_setup_input(msc->input, hdev);
+ if (ret) {
+ hid_err(hdev, "magicmouse setup input failed (%d)\n", ret);
+ goto err_stop_hw;
+ }
}
if (id->product == USB_DEVICE_ID_APPLE_MAGICMOUSE)
@@ -578,7 +568,6 @@ static struct hid_driver magicmouse_driver = {
.remove = magicmouse_remove,
.raw_event = magicmouse_raw_event,
.input_mapping = magicmouse_input_mapping,
- .input_configured = magicmouse_input_configured,
};
module_hid_driver(magicmouse_driver);
diff --git a/trunk/drivers/hwspinlock/hwspinlock_core.c b/trunk/drivers/hwspinlock/hwspinlock_core.c
index 461a0d739d75..db713c0dfba4 100644
--- a/trunk/drivers/hwspinlock/hwspinlock_core.c
+++ b/trunk/drivers/hwspinlock/hwspinlock_core.c
@@ -416,8 +416,6 @@ static int __hwspin_lock_request(struct hwspinlock *hwlock)
ret = pm_runtime_get_sync(dev);
if (ret < 0) {
dev_err(dev, "%s: can't power on device\n", __func__);
- pm_runtime_put_noidle(dev);
- module_put(dev->driver->owner);
return ret;
}
diff --git a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c
index e3085c487ace..0ceb6e1b0f65 100644
--- a/trunk/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/trunk/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -182,6 +182,7 @@ static int dw_i2c_probe(struct platform_device *pdev)
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
adap->dev.of_node = pdev->dev.of_node;
+ ACPI_HANDLE_SET(&adap->dev, ACPI_HANDLE(&pdev->dev));
r = i2c_add_numbered_adapter(adap);
if (r) {
diff --git a/trunk/drivers/idle/intel_idle.c b/trunk/drivers/idle/intel_idle.c
index 1a38dd7dfe4e..5d6675013864 100644
--- a/trunk/drivers/idle/intel_idle.c
+++ b/trunk/drivers/idle/intel_idle.c
@@ -465,7 +465,6 @@ static const struct x86_cpu_id intel_idle_ids[] = {
ICPU(0x3c, idle_cpu_hsw),
ICPU(0x3f, idle_cpu_hsw),
ICPU(0x45, idle_cpu_hsw),
- ICPU(0x46, idle_cpu_hsw),
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_idle_ids);
diff --git a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c
index 911205d3d5a0..08a6c6d39e56 100644
--- a/trunk/drivers/infiniband/hw/qib/qib_sd7220.c
+++ b/trunk/drivers/infiniband/hw/qib/qib_sd7220.c
@@ -44,7 +44,7 @@
#include "qib.h"
#include "qib_7220.h"
-#define SD7220_FW_NAME "qlogic/sd7220.fw"
+#define SD7220_FW_NAME "intel/sd7220.fw"
MODULE_FIRMWARE(SD7220_FW_NAME);
/*
diff --git a/trunk/drivers/input/tablet/wacom_wac.c b/trunk/drivers/input/tablet/wacom_wac.c
index 0bfd8cf25200..1daa97913b7d 100644
--- a/trunk/drivers/input/tablet/wacom_wac.c
+++ b/trunk/drivers/input/tablet/wacom_wac.c
@@ -359,7 +359,7 @@ static int wacom_intuos_inout(struct wacom_wac *wacom)
case 0x802: /* Intuos4 General Pen */
case 0x804: /* Intuos4 Marker Pen */
case 0x40802: /* Intuos4 Classic Pen */
- case 0x18802: /* DTH2242 Grip Pen */
+ case 0x18803: /* DTH2242 Grip Pen */
case 0x022:
wacom->tool[idx] = BTN_TOOL_PEN;
break;
@@ -1912,7 +1912,7 @@ static const struct wacom_features wacom_features_0xBB =
{ "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047,
63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0xBC =
- { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40640, 25400, 2047,
+ { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047,
63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
static const struct wacom_features wacom_features_0x26 =
{ "Wacom Intuos5 touch S", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047,
@@ -2144,7 +2144,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x44) },
{ USB_DEVICE_WACOM(0x45) },
{ USB_DEVICE_WACOM(0x59) },
- { USB_DEVICE_DETAILED(0x5D, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0x5D) },
{ USB_DEVICE_WACOM(0xB0) },
{ USB_DEVICE_WACOM(0xB1) },
{ USB_DEVICE_WACOM(0xB2) },
@@ -2209,7 +2209,7 @@ const struct usb_device_id wacom_ids[] = {
{ USB_DEVICE_WACOM(0x47) },
{ USB_DEVICE_WACOM(0xF4) },
{ USB_DEVICE_WACOM(0xF8) },
- { USB_DEVICE_DETAILED(0xF6, USB_CLASS_HID, 0, 0) },
+ { USB_DEVICE_WACOM(0xF6) },
{ USB_DEVICE_WACOM(0xFA) },
{ USB_DEVICE_LENOVO(0x6004) },
{ }
diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/drivers/irqchip/irq-gic.c
index fc6aebf1e4b2..a32e0d5aa45f 100644
--- a/trunk/drivers/irqchip/irq-gic.c
+++ b/trunk/drivers/irqchip/irq-gic.c
@@ -236,8 +236,7 @@ static int gic_retrigger(struct irq_data *d)
if (gic_arch_extn.irq_retrigger)
return gic_arch_extn.irq_retrigger(d);
- /* the genirq layer expects 0 if we can't retrigger in hardware */
- return 0;
+ return -ENXIO;
}
#ifdef CONFIG_SMP
diff --git a/trunk/drivers/macintosh/smu.c b/trunk/drivers/macintosh/smu.c
index b3b2d36c009e..9c6b96414862 100644
--- a/trunk/drivers/macintosh/smu.c
+++ b/trunk/drivers/macintosh/smu.c
@@ -120,7 +120,11 @@ static void smu_start_cmd(void)
DPRINTK("SMU: starting cmd %x, %d bytes data\n", cmd->cmd,
cmd->data_len);
- DPRINTK("SMU: data buffer: %8ph\n", cmd->data_buf);
+ DPRINTK("SMU: data buffer: %02x %02x %02x %02x %02x %02x %02x %02x\n",
+ ((u8 *)cmd->data_buf)[0], ((u8 *)cmd->data_buf)[1],
+ ((u8 *)cmd->data_buf)[2], ((u8 *)cmd->data_buf)[3],
+ ((u8 *)cmd->data_buf)[4], ((u8 *)cmd->data_buf)[5],
+ ((u8 *)cmd->data_buf)[6], ((u8 *)cmd->data_buf)[7]);
/* Fill the SMU command buffer */
smu->cmd_buf->cmd = cmd->cmd;
diff --git a/trunk/drivers/macintosh/via-pmu.c b/trunk/drivers/macintosh/via-pmu.c
index c9c85426f4ff..22b8ce4191cc 100644
--- a/trunk/drivers/macintosh/via-pmu.c
+++ b/trunk/drivers/macintosh/via-pmu.c
@@ -750,9 +750,8 @@ done_battery_state_smart(struct adb_request* req)
voltage = (req->reply[8] << 8) | req->reply[9];
break;
default:
- pr_warn("pmu.c: unrecognized battery info, "
- "len: %d, %4ph\n", req->reply_len,
- req->reply);
+ printk(KERN_WARNING "pmu.c : unrecognized battery info, len: %d, %02x %02x %02x %02x\n",
+ req->reply_len, req->reply[0], req->reply[1], req->reply[2], req->reply[3]);
break;
}
}
diff --git a/trunk/drivers/md/dm-cache-target.c b/trunk/drivers/md/dm-cache-target.c
index 10744091e6ca..66120bd46d15 100644
--- a/trunk/drivers/md/dm-cache-target.c
+++ b/trunk/drivers/md/dm-cache-target.c
@@ -6,7 +6,6 @@
#include "dm.h"
#include "dm-bio-prison.h"
-#include "dm-bio-record.h"
#include "dm-cache-metadata.h"
#include
@@ -202,15 +201,10 @@ struct per_bio_data {
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
- /*
- * writethrough fields. These MUST remain at the end of this
- * structure and the 'cache' member must be the first as it
- * is used to determine the offsetof the writethrough fields.
- */
+ /* writethrough fields */
struct cache *cache;
dm_cblock_t cblock;
bio_end_io_t *saved_bi_end_io;
- struct dm_bio_details bio_details;
};
struct dm_cache_migration {
@@ -519,28 +513,16 @@ static void save_stats(struct cache *cache)
/*----------------------------------------------------------------
* Per bio data
*--------------------------------------------------------------*/
-
-/*
- * If using writeback, leave out struct per_bio_data's writethrough fields.
- */
-#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
-#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
-
-static size_t get_per_bio_data_size(struct cache *cache)
-{
- return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
-}
-
-static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
+static struct per_bio_data *get_per_bio_data(struct bio *bio)
{
- struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = dm_per_bio_data(bio, sizeof(struct per_bio_data));
BUG_ON(!pb);
return pb;
}
-static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
+static struct per_bio_data *init_per_bio_data(struct bio *bio)
{
- struct per_bio_data *pb = get_per_bio_data(bio, data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
pb->tick = false;
pb->req_nr = dm_bio_get_target_bio_nr(bio);
@@ -574,8 +556,7 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
{
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
spin_lock_irqsave(&cache->lock, flags);
if (cache->need_tick_bio &&
@@ -654,7 +635,7 @@ static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
static void writethrough_endio(struct bio *bio, int err)
{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+ struct per_bio_data *pb = get_per_bio_data(bio);
bio->bi_end_io = pb->saved_bi_end_io;
if (err) {
@@ -662,7 +643,6 @@ static void writethrough_endio(struct bio *bio, int err)
return;
}
- dm_bio_restore(&pb->bio_details, bio);
remap_to_cache(pb->cache, bio, pb->cblock);
/*
@@ -682,12 +662,11 @@ static void writethrough_endio(struct bio *bio, int err)
static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
dm_oblock_t oblock, dm_cblock_t cblock)
{
- struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
+ struct per_bio_data *pb = get_per_bio_data(bio);
pb->cache = cache;
pb->cblock = cblock;
pb->saved_bi_end_io = bio->bi_end_io;
- dm_bio_record(&pb->bio_details, bio);
bio->bi_end_io = writethrough_endio;
remap_to_origin_clear_discard(pb->cache, bio, oblock);
@@ -1056,8 +1035,7 @@ static void defer_bio(struct cache *cache, struct bio *bio)
static void process_flush_bio(struct cache *cache, struct bio *bio)
{
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
BUG_ON(bio->bi_size);
if (!pb->req_nr)
@@ -1129,8 +1107,7 @@ static void process_bio(struct cache *cache, struct prealloc *structs,
dm_oblock_t block = get_bio_block(cache, bio);
struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
struct policy_result lookup_result;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
bool discarded_block = is_discarded_oblock(cache, block);
bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
@@ -1904,6 +1881,7 @@ static int cache_create(struct cache_args *ca, struct cache **result)
cache->ti = ca->ti;
ti->private = cache;
+ ti->per_bio_data_size = sizeof(struct per_bio_data);
ti->num_flush_bios = 2;
ti->flush_supported = true;
@@ -1912,7 +1890,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
ti->discard_zeroes_data_unsupported = true;
memcpy(&cache->features, &ca->features, sizeof(cache->features));
- ti->per_bio_data_size = get_per_bio_data_size(cache);
cache->callbacks.congested_fn = cache_is_congested;
dm_table_add_target_callbacks(ti->table, &cache->callbacks);
@@ -2115,7 +2092,6 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
int r;
dm_oblock_t block = get_bio_block(cache, bio);
- size_t pb_data_size = get_per_bio_data_size(cache);
bool can_migrate = false;
bool discarded_block;
struct dm_bio_prison_cell *cell;
@@ -2132,7 +2108,7 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio, pb_data_size);
+ pb = init_per_bio_data(bio);
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
@@ -2217,8 +2193,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
{
struct cache *cache = ti->private;
unsigned long flags;
- size_t pb_data_size = get_per_bio_data_size(cache);
- struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
+ struct per_bio_data *pb = get_per_bio_data(bio);
if (pb->tick) {
policy_tick(cache->policy);
diff --git a/trunk/drivers/md/dm.c b/trunk/drivers/md/dm.c
index 9a0bdad9ad8f..7e469260fe5e 100644
--- a/trunk/drivers/md/dm.c
+++ b/trunk/drivers/md/dm.c
@@ -611,7 +611,6 @@ static void dec_pending(struct dm_io *io, int error)
queue_io(md, bio);
} else {
/* done with normal IO or empty flush */
- trace_block_bio_complete(md->queue, bio, io_error);
bio_endio(bio, io_error);
}
}
diff --git a/trunk/drivers/md/raid5.c b/trunk/drivers/md/raid5.c
index f4e87bfc7567..24909eb13fec 100644
--- a/trunk/drivers/md/raid5.c
+++ b/trunk/drivers/md/raid5.c
@@ -184,8 +184,6 @@ static void return_io(struct bio *return_bi)
return_bi = bi->bi_next;
bi->bi_next = NULL;
bi->bi_size = 0;
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
bi = return_bi;
}
@@ -3916,8 +3914,6 @@ static void raid5_align_endio(struct bio *bi, int error)
rdev_dec_pending(rdev, conf->mddev);
if (!error && uptodate) {
- trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev),
- raid_bi, 0);
bio_endio(raid_bi, 0);
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
@@ -4386,8 +4382,6 @@ static void make_request(struct mddev *mddev, struct bio * bi)
if ( rw == WRITE )
md_write_end(mddev);
- trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
- bi, 0);
bio_endio(bi, 0);
}
}
@@ -4764,11 +4758,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
handled++;
}
remaining = raid5_dec_bi_active_stripes(raid_bio);
- if (remaining == 0) {
- trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev),
- raid_bio, 0);
+ if (remaining == 0)
bio_endio(raid_bio, 0);
- }
if (atomic_dec_and_test(&conf->active_aligned_reads))
wake_up(&conf->wait_for_stripe);
return handled;
diff --git a/trunk/drivers/media/platform/Kconfig b/trunk/drivers/media/platform/Kconfig
index a0639e779973..05d7b6333461 100644
--- a/trunk/drivers/media/platform/Kconfig
+++ b/trunk/drivers/media/platform/Kconfig
@@ -204,7 +204,7 @@ config VIDEO_SAMSUNG_EXYNOS_GSC
config VIDEO_SH_VEU
tristate "SuperH VEU mem2mem video processing driver"
- depends on VIDEO_DEV && VIDEO_V4L2 && GENERIC_HARDIRQS
+ depends on VIDEO_DEV && VIDEO_V4L2
select VIDEOBUF2_DMA_CONTIG
select V4L2_MEM2MEM_DEV
help
diff --git a/trunk/drivers/media/radio/radio-ma901.c b/trunk/drivers/media/radio/radio-ma901.c
index 348dafc0318a..c61f590029ad 100644
--- a/trunk/drivers/media/radio/radio-ma901.c
+++ b/trunk/drivers/media/radio/radio-ma901.c
@@ -347,20 +347,9 @@ static void usb_ma901radio_release(struct v4l2_device *v4l2_dev)
static int usb_ma901radio_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
- struct usb_device *dev = interface_to_usbdev(intf);
struct ma901radio_device *radio;
int retval = 0;
- /* Masterkit MA901 usb radio has the same USB ID as many others
- * Atmel V-USB devices. Let's make additional checks to be sure
- * that this is our device.
- */
-
- if (dev->product && dev->manufacturer &&
- (strncmp(dev->product, "MA901", 5) != 0
- || strncmp(dev->manufacturer, "www.masterkit.ru", 16) != 0))
- return -ENODEV;
-
radio = kzalloc(sizeof(struct ma901radio_device), GFP_KERNEL);
if (!radio) {
dev_err(&intf->dev, "kzalloc for ma901radio_device failed\n");
diff --git a/trunk/drivers/misc/vmw_vmci/Kconfig b/trunk/drivers/misc/vmw_vmci/Kconfig
index ea98f7e9ccd1..39c2ecadb273 100644
--- a/trunk/drivers/misc/vmw_vmci/Kconfig
+++ b/trunk/drivers/misc/vmw_vmci/Kconfig
@@ -4,7 +4,7 @@
config VMWARE_VMCI
tristate "VMware VMCI Driver"
- depends on X86 && PCI && NET
+ depends on X86 && PCI
help
This is VMware's Virtual Machine Communication Interface. It enables
high-speed communication between host and guest in a virtual
diff --git a/trunk/drivers/mtd/mtdchar.c b/trunk/drivers/mtd/mtdchar.c
index dc571ebc1aa0..92ab30ab00dc 100644
--- a/trunk/drivers/mtd/mtdchar.c
+++ b/trunk/drivers/mtd/mtdchar.c
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
}
#endif
+static inline unsigned long get_vm_size(struct vm_area_struct *vma)
+{
+ return vma->vm_end - vma->vm_start;
+}
+
+static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
+{
+ return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
+}
+
+/*
+ * Set a new vm offset.
+ *
+ * Verify that the incoming offset really works as a page offset,
+ * and that the offset and size fit in a resource_size_t.
+ */
+static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
+{
+ pgoff_t pgoff = off >> PAGE_SHIFT;
+ if (off != (resource_size_t) pgoff << PAGE_SHIFT)
+ return -EINVAL;
+ if (off + get_vm_size(vma) - 1 < off)
+ return -EINVAL;
+ vma->vm_pgoff = pgoff;
+ return 0;
+}
+
/*
* set up a mapping for shared memory segments
*/
@@ -1132,17 +1159,45 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
struct mtd_file_info *mfi = file->private_data;
struct mtd_info *mtd = mfi->mtd;
struct map_info *map = mtd->priv;
+ resource_size_t start, off;
+ unsigned long len, vma_len;
/* This is broken because it assumes the MTD device is map-based
and that mtd->priv is a valid struct map_info. It should be
replaced with something that uses the mtd_get_unmapped_area()
operation properly. */
if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) {
+ off = get_vm_offset(vma);
+ start = map->phys;
+ len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
+ start &= PAGE_MASK;
+ vma_len = get_vm_size(vma);
+
+ /* Overflow in off+len? */
+ if (vma_len + off < off)
+ return -EINVAL;
+ /* Does it fit in the mapping? */
+ if (vma_len + off > len)
+ return -EINVAL;
+
+ off += start;
+ /* Did that overflow? */
+ if (off < start)
+ return -EINVAL;
+ if (set_vm_offset(vma, off) < 0)
+ return -EINVAL;
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
+
#ifdef pgprot_noncached
- if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory))
+ if (file->f_flags & O_DSYNC || off >= __pa(high_memory))
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
#endif
- return vm_iomap_memory(vma, map->phys, map->size);
+ if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT,
+ vma->vm_end - vma->vm_start,
+ vma->vm_page_prot))
+ return -EAGAIN;
+
+ return 0;
}
return -ENOSYS;
#else
diff --git a/trunk/drivers/net/bonding/bond_main.c b/trunk/drivers/net/bonding/bond_main.c
index dbbea0eec134..6bbd90e1123c 100644
--- a/trunk/drivers/net/bonding/bond_main.c
+++ b/trunk/drivers/net/bonding/bond_main.c
@@ -846,10 +846,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);
- netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_del(old_active->dev, ha->addr);
- netif_addr_unlock_bh(bond->dev);
}
if (new_active) {
@@ -860,10 +858,8 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);
- netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_add(new_active->dev, ha->addr);
- netif_addr_unlock_bh(bond->dev);
}
}
@@ -1905,29 +1901,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_destroy_slave_symlinks(bond_dev, slave_dev);
err_detach:
- if (!USES_PRIMARY(bond->params.mode)) {
- netif_addr_lock_bh(bond_dev);
- bond_mc_list_flush(bond_dev, slave_dev);
- netif_addr_unlock_bh(bond_dev);
- }
- bond_del_vlans_from_slave(bond, slave_dev);
write_lock_bh(&bond->lock);
bond_detach_slave(bond, new_slave);
- if (bond->primary_slave == new_slave)
- bond->primary_slave = NULL;
write_unlock_bh(&bond->lock);
- if (bond->curr_active_slave == new_slave) {
- read_lock(&bond->lock);
- write_lock_bh(&bond->curr_slave_lock);
- bond_change_active_slave(bond, NULL);
- bond_select_active_slave(bond);
- write_unlock_bh(&bond->curr_slave_lock);
- read_unlock(&bond->lock);
- }
- slave_disable_netpoll(new_slave);
err_close:
- slave_dev->priv_flags &= ~IFF_BONDING;
dev_close(slave_dev);
err_unset_master:
@@ -1998,11 +1976,12 @@ static int __bond_release_one(struct net_device *bond_dev,
return -EINVAL;
}
- write_unlock_bh(&bond->lock);
/* unregister rx_handler early so bond_handle_frame wouldn't be called
* for this slave anymore.
*/
netdev_rx_handler_unregister(slave_dev);
+ write_unlock_bh(&bond->lock);
+ synchronize_net();
write_lock_bh(&bond->lock);
if (!all && !bond->params.fail_over_mac) {
@@ -3190,20 +3169,11 @@ static int bond_slave_netdev_event(unsigned long event,
struct net_device *slave_dev)
{
struct slave *slave = bond_slave_get_rtnl(slave_dev);
- struct bonding *bond;
- struct net_device *bond_dev;
+ struct bonding *bond = slave->bond;
+ struct net_device *bond_dev = slave->bond->dev;
u32 old_speed;
u8 old_duplex;
- /* A netdev event can be generated while enslaving a device
- * before netdev_rx_handler_register is called in which case
- * slave will be NULL
- */
- if (!slave)
- return NOTIFY_DONE;
- bond_dev = slave->bond->dev;
- bond = slave->bond;
-
switch (event) {
case NETDEV_UNREGISTER:
if (bond->setup_by_slave)
@@ -3317,22 +3287,20 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
*/
static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
{
- const struct ethhdr *data;
- const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
+ struct ethhdr *data = (struct ethhdr *)skb->data;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
u32 v6hash;
- const __be32 *s, *d;
+ __be32 *s, *d;
if (skb->protocol == htons(ETH_P_IP) &&
- pskb_network_may_pull(skb, sizeof(*iph))) {
+ skb_network_header_len(skb) >= sizeof(*iph)) {
iph = ip_hdr(skb);
- data = (struct ethhdr *)skb->data;
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
(data->h_dest[5] ^ data->h_source[5])) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_network_may_pull(skb, sizeof(*ipv6h))) {
+ skb_network_header_len(skb) >= sizeof(*ipv6h)) {
ipv6h = ipv6_hdr(skb);
- data = (struct ethhdr *)skb->data;
s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0];
v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
@@ -3351,36 +3319,33 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
{
u32 layer4_xor = 0;
- const struct iphdr *iph;
- const struct ipv6hdr *ipv6h;
- const __be32 *s, *d;
- const __be16 *l4 = NULL;
- __be16 _l4[2];
- int noff = skb_network_offset(skb);
- int poff;
+ struct iphdr *iph;
+ struct ipv6hdr *ipv6h;
+ __be32 *s, *d;
+ __be16 *layer4hdr;
if (skb->protocol == htons(ETH_P_IP) &&
- pskb_may_pull(skb, noff + sizeof(*iph))) {
+ skb_network_header_len(skb) >= sizeof(*iph)) {
iph = ip_hdr(skb);
- poff = proto_ports_offset(iph->protocol);
-
- if (!ip_is_fragment(iph) && poff >= 0) {
- l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
+ if (!ip_is_fragment(iph) &&
+ (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP) &&
+ (skb_headlen(skb) - skb_network_offset(skb) >=
+ iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
+ layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
+ layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
}
return (layer4_xor ^
((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) &&
- pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
+ skb_network_header_len(skb) >= sizeof(*ipv6h)) {
ipv6h = ipv6_hdr(skb);
- poff = proto_ports_offset(ipv6h->nexthdr);
- if (poff >= 0) {
- l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
- sizeof(_l4), &_l4);
- if (l4)
- layer4_xor = ntohs(l4[0] ^ l4[1]);
+ if ((ipv6h->nexthdr == IPPROTO_TCP ||
+ ipv6h->nexthdr == IPPROTO_UDP) &&
+ (skb_headlen(skb) - skb_network_offset(skb) >=
+ sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
+ layer4hdr = (__be16 *)(ipv6h + 1);
+ layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
}
s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0];
@@ -4882,18 +4847,9 @@ static int __net_init bond_net_init(struct net *net)
static void __net_exit bond_net_exit(struct net *net)
{
struct bond_net *bn = net_generic(net, bond_net_id);
- struct bonding *bond, *tmp_bond;
- LIST_HEAD(list);
bond_destroy_sysfs(bn);
bond_destroy_proc_dir(bn);
-
- /* Kill off any bonds created after unregistering bond rtnl ops */
- rtnl_lock();
- list_for_each_entry_safe(bond, tmp_bond, &bn->dev_list, bond_list)
- unregister_netdevice_queue(bond->dev, &list);
- unregister_netdevice_many(&list);
- rtnl_unlock();
}
static struct pernet_operations bond_net_ops = {
diff --git a/trunk/drivers/net/bonding/bond_sysfs.c b/trunk/drivers/net/bonding/bond_sysfs.c
index ea7a388f4843..db103e03ba05 100644
--- a/trunk/drivers/net/bonding/bond_sysfs.c
+++ b/trunk/drivers/net/bonding/bond_sysfs.c
@@ -527,7 +527,7 @@ static ssize_t bonding_store_arp_interval(struct device *d,
goto out;
}
if (new_value < 0) {
- pr_err("%s: Invalid arp_interval value %d not in range 0-%d; rejected.\n",
+ pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n",
bond->dev->name, new_value, INT_MAX);
ret = -EINVAL;
goto out;
@@ -542,15 +542,14 @@ static ssize_t bonding_store_arp_interval(struct device *d,
pr_info("%s: Setting ARP monitoring interval to %d.\n",
bond->dev->name, new_value);
bond->params.arp_interval = new_value;
- if (new_value) {
- if (bond->params.miimon) {
- pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
- bond->dev->name, bond->dev->name);
- bond->params.miimon = 0;
- }
- if (!bond->params.arp_targets[0])
- pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
- bond->dev->name);
+ if (bond->params.miimon) {
+ pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n",
+ bond->dev->name, bond->dev->name);
+ bond->params.miimon = 0;
+ }
+ if (!bond->params.arp_targets[0]) {
+ pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n",
+ bond->dev->name);
}
if (bond->dev->flags & IFF_UP) {
/* If the interface is up, we may need to fire off
@@ -558,13 +557,10 @@ static ssize_t bonding_store_arp_interval(struct device *d,
* timer will get fired off when the open function
* is called.
*/
- if (!new_value) {
- cancel_delayed_work_sync(&bond->arp_work);
- } else {
- cancel_delayed_work_sync(&bond->mii_work);
- queue_delayed_work(bond->wq, &bond->arp_work, 0);
- }
+ cancel_delayed_work_sync(&bond->mii_work);
+ queue_delayed_work(bond->wq, &bond->arp_work, 0);
}
+
out:
rtnl_unlock();
return ret;
@@ -706,7 +702,7 @@ static ssize_t bonding_store_downdelay(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
+ bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@@ -761,8 +757,8 @@ static ssize_t bonding_store_updelay(struct device *d,
goto out;
}
if (new_value < 0) {
- pr_err("%s: Invalid up delay value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
+ pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n",
+ bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
} else {
@@ -972,37 +968,37 @@ static ssize_t bonding_store_miimon(struct device *d,
}
if (new_value < 0) {
pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n",
- bond->dev->name, new_value, 0, INT_MAX);
+ bond->dev->name, new_value, 1, INT_MAX);
ret = -EINVAL;
goto out;
- }
- pr_info("%s: Setting MII monitoring interval to %d.\n",
- bond->dev->name, new_value);
- bond->params.miimon = new_value;
- if (bond->params.updelay)
- pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.updelay * bond->params.miimon);
- if (bond->params.downdelay)
- pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
- bond->dev->name,
- bond->params.downdelay * bond->params.miimon);
- if (new_value && bond->params.arp_interval) {
- pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
- bond->dev->name);
- bond->params.arp_interval = 0;
- if (bond->params.arp_validate)
- bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
- }
- if (bond->dev->flags & IFF_UP) {
- /* If the interface is up, we may need to fire off
- * the MII timer. If the interface is down, the
- * timer will get fired off when the open function
- * is called.
- */
- if (!new_value) {
- cancel_delayed_work_sync(&bond->mii_work);
- } else {
+ } else {
+ pr_info("%s: Setting MII monitoring interval to %d.\n",
+ bond->dev->name, new_value);
+ bond->params.miimon = new_value;
+ if (bond->params.updelay)
+ pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.updelay * bond->params.miimon);
+ if (bond->params.downdelay)
+ pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n",
+ bond->dev->name,
+ bond->params.downdelay * bond->params.miimon);
+ if (bond->params.arp_interval) {
+ pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n",
+ bond->dev->name);
+ bond->params.arp_interval = 0;
+ if (bond->params.arp_validate) {
+ bond->params.arp_validate =
+ BOND_ARP_VALIDATE_NONE;
+ }
+ }
+
+ if (bond->dev->flags & IFF_UP) {
+ /* If the interface is up, we may need to fire off
+ * the MII timer. If the interface is down, the
+ * timer will get fired off when the open function
+ * is called.
+ */
cancel_delayed_work_sync(&bond->arp_work);
queue_delayed_work(bond->wq, &bond->mii_work, 0);
}
diff --git a/trunk/drivers/net/can/mcp251x.c b/trunk/drivers/net/can/mcp251x.c
index 9aa0c64c33c8..f32b9fc6a983 100644
--- a/trunk/drivers/net/can/mcp251x.c
+++ b/trunk/drivers/net/can/mcp251x.c
@@ -929,7 +929,6 @@ static int mcp251x_open(struct net_device *net)
struct mcp251x_priv *priv = netdev_priv(net);
struct spi_device *spi = priv->spi;
struct mcp251x_platform_data *pdata = spi->dev.platform_data;
- unsigned long flags;
int ret;
ret = open_candev(net);
@@ -946,14 +945,9 @@ static int mcp251x_open(struct net_device *net)
priv->tx_skb = NULL;
priv->tx_len = 0;
- flags = IRQF_ONESHOT;
- if (pdata->irq_flags)
- flags |= pdata->irq_flags;
- else
- flags |= IRQF_TRIGGER_FALLING;
-
ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist,
- flags, DEVICE_NAME, priv);
+ pdata->irq_flags ? pdata->irq_flags : IRQF_TRIGGER_FALLING,
+ DEVICE_NAME, priv);
if (ret) {
dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
if (pdata->transceiver_enable)
diff --git a/trunk/drivers/net/can/sja1000/Kconfig b/trunk/drivers/net/can/sja1000/Kconfig
index ff2ba86cd4a4..b39ca5b3ea7f 100644
--- a/trunk/drivers/net/can/sja1000/Kconfig
+++ b/trunk/drivers/net/can/sja1000/Kconfig
@@ -46,7 +46,6 @@ config CAN_EMS_PCI
config CAN_PEAK_PCMCIA
tristate "PEAK PCAN-PC Card"
depends on PCMCIA
- depends on HAS_IOPORT
---help---
This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
from PEAK-System (http://www.peak-system.com). To compile this
diff --git a/trunk/drivers/net/can/sja1000/plx_pci.c b/trunk/drivers/net/can/sja1000/plx_pci.c
index 3c18d7d000ed..a042cdc260dc 100644
--- a/trunk/drivers/net/can/sja1000/plx_pci.c
+++ b/trunk/drivers/net/can/sja1000/plx_pci.c
@@ -348,7 +348,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
*/
if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
REG_CR_BASICCAN_INITIAL &&
- (priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_BASICCAN_INITIAL) &&
+ (priv->read_reg(priv, REG_SR) == REG_SR_BASICCAN_INITIAL) &&
(priv->read_reg(priv, REG_IR) == REG_IR_BASICCAN_INITIAL))
flag = 1;
@@ -360,7 +360,7 @@ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv)
* See states on p. 23 of the Datasheet.
*/
if (priv->read_reg(priv, REG_MOD) == REG_MOD_PELICAN_INITIAL &&
- priv->read_reg(priv, SJA1000_REG_SR) == REG_SR_PELICAN_INITIAL &&
+ priv->read_reg(priv, REG_SR) == REG_SR_PELICAN_INITIAL &&
priv->read_reg(priv, REG_IR) == REG_IR_PELICAN_INITIAL)
return flag;
diff --git a/trunk/drivers/net/can/sja1000/sja1000.c b/trunk/drivers/net/can/sja1000/sja1000.c
index e4df307eaa90..daf4013a8fc7 100644
--- a/trunk/drivers/net/can/sja1000/sja1000.c
+++ b/trunk/drivers/net/can/sja1000/sja1000.c
@@ -92,7 +92,7 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val)
*/
spin_lock_irqsave(&priv->cmdreg_lock, flags);
priv->write_reg(priv, REG_CMR, val);
- priv->read_reg(priv, SJA1000_REG_SR);
+ priv->read_reg(priv, REG_SR);
spin_unlock_irqrestore(&priv->cmdreg_lock, flags);
}
@@ -502,7 +502,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) {
n++;
- status = priv->read_reg(priv, SJA1000_REG_SR);
+ status = priv->read_reg(priv, REG_SR);
/* check for absent controller due to hw unplug */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;
@@ -530,7 +530,7 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id)
/* receive interrupt */
while (status & SR_RBS) {
sja1000_rx(dev);
- status = priv->read_reg(priv, SJA1000_REG_SR);
+ status = priv->read_reg(priv, REG_SR);
/* check for absent controller */
if (status == 0xFF && sja1000_is_absent(priv))
return IRQ_NONE;
diff --git a/trunk/drivers/net/can/sja1000/sja1000.h b/trunk/drivers/net/can/sja1000/sja1000.h
index aa48e053da27..afa99847a510 100644
--- a/trunk/drivers/net/can/sja1000/sja1000.h
+++ b/trunk/drivers/net/can/sja1000/sja1000.h
@@ -56,7 +56,7 @@
/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
#define REG_MOD 0x00
#define REG_CMR 0x01
-#define SJA1000_REG_SR 0x02
+#define REG_SR 0x02
#define REG_IR 0x03
#define REG_IER 0x04
#define REG_ALC 0x0B
diff --git a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
index 8e0c4a001939..6433b81256cd 100644
--- a/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/trunk/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -96,8 +96,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
struct net_device *dev;
struct sja1000_priv *priv;
struct resource res;
- u32 prop;
- int err, irq, res_size;
+ const u32 *prop;
+ int err, irq, res_size, prop_size;
void __iomem *base;
err = of_address_to_resource(np, 0, &res);
@@ -138,27 +138,27 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
priv->read_reg = sja1000_ofp_read_reg;
priv->write_reg = sja1000_ofp_write_reg;
- err = of_property_read_u32(np, "nxp,external-clock-frequency", &prop);
- if (!err)
- priv->can.clock.freq = prop / 2;
+ prop = of_get_property(np, "nxp,external-clock-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->can.clock.freq = *prop / 2;
else
priv->can.clock.freq = SJA1000_OFP_CAN_CLOCK; /* default */
- err = of_property_read_u32(np, "nxp,tx-output-mode", &prop);
- if (!err)
- priv->ocr |= prop & OCR_MODE_MASK;
+ prop = of_get_property(np, "nxp,tx-output-mode", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->ocr |= *prop & OCR_MODE_MASK;
else
priv->ocr |= OCR_MODE_NORMAL; /* default */
- err = of_property_read_u32(np, "nxp,tx-output-config", &prop);
- if (!err)
- priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK;
+ prop = of_get_property(np, "nxp,tx-output-config", &prop_size);
+ if (prop && (prop_size == sizeof(u32)))
+ priv->ocr |= (*prop << OCR_TX_SHIFT) & OCR_TX_MASK;
else
priv->ocr |= OCR_TX0_PULLDOWN; /* default */
- err = of_property_read_u32(np, "nxp,clock-out-frequency", &prop);
- if (!err && prop) {
- u32 divider = priv->can.clock.freq * 2 / prop;
+ prop = of_get_property(np, "nxp,clock-out-frequency", &prop_size);
+ if (prop && (prop_size == sizeof(u32)) && *prop) {
+ u32 divider = priv->can.clock.freq * 2 / *prop;
if (divider > 1)
priv->cdr |= divider / 2 - 1;
@@ -168,7 +168,8 @@ static int sja1000_ofp_probe(struct platform_device *ofdev)
priv->cdr |= CDR_CLK_OFF; /* default */
}
- if (!of_property_read_bool(np, "nxp,no-comparator-bypass"))
+ prop = of_get_property(np, "nxp,no-comparator-bypass", NULL);
+ if (!prop)
priv->cdr |= CDR_CBP; /* default */
priv->irq_flags = IRQF_SHARED;
diff --git a/trunk/drivers/net/ethernet/8390/ax88796.c b/trunk/drivers/net/ethernet/8390/ax88796.c
index e1d26433d619..cab306a9888e 100644
--- a/trunk/drivers/net/ethernet/8390/ax88796.c
+++ b/trunk/drivers/net/ethernet/8390/ax88796.c
@@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
struct ei_device *ei_local;
struct ax_device *ax;
struct resource *irq, *mem, *mem2;
- unsigned long mem_size, mem2_size = 0;
+ resource_size_t mem_size, mem2_size = 0;
int ret = 0;
dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
diff --git a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h
index b5fd934585e9..829b5ad71d0d 100644
--- a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h
+++ b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e.h
@@ -186,7 +186,7 @@ struct atl1e_tpd_desc {
/* how about 0x2000 */
#define MAX_TX_BUF_LEN 0x2000
#define MAX_TX_BUF_SHIFT 13
-#define MAX_TSO_SEG_SIZE 0x3c00
+/*#define MAX_TX_BUF_LEN 0x3000 */
/* rrs word 1 bit 0:31 */
#define RRS_RX_CSUM_MASK 0xFFFF
@@ -438,6 +438,7 @@ struct atl1e_adapter {
struct atl1e_hw hw;
struct atl1e_hw_stats hw_stats;
+ bool have_msi;
u32 wol;
u16 link_speed;
u16 link_duplex;
diff --git a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index ac25f05ff68f..92f4734f860d 100644
--- a/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/trunk/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1849,19 +1849,34 @@ static void atl1e_free_irq(struct atl1e_adapter *adapter)
struct net_device *netdev = adapter->netdev;
free_irq(adapter->pdev->irq, netdev);
+
+ if (adapter->have_msi)
+ pci_disable_msi(adapter->pdev);
}
static int atl1e_request_irq(struct atl1e_adapter *adapter)
{
struct pci_dev *pdev = adapter->pdev;
struct net_device *netdev = adapter->netdev;
+ int flags = 0;
int err = 0;
- err = request_irq(pdev->irq, atl1e_intr, IRQF_SHARED, netdev->name,
- netdev);
+ adapter->have_msi = true;
+ err = pci_enable_msi(pdev);
+ if (err) {
+ netdev_dbg(netdev,
+ "Unable to allocate MSI interrupt Error: %d\n", err);
+ adapter->have_msi = false;
+ }
+
+ if (!adapter->have_msi)
+ flags |= IRQF_SHARED;
+ err = request_irq(pdev->irq, atl1e_intr, flags, netdev->name, netdev);
if (err) {
netdev_dbg(adapter->netdev,
"Unable to allocate interrupt Error: %d\n", err);
+ if (adapter->have_msi)
+ pci_disable_msi(pdev);
return err;
}
netdev_dbg(netdev, "atl1e_request_irq OK\n");
@@ -2329,7 +2344,6 @@ static int atl1e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_WORK(&adapter->reset_task, atl1e_reset_task);
INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task);
- netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE);
err = register_netdev(netdev);
if (err) {
netdev_err(netdev, "register netdevice failed\n");
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 57619dd4a92b..4046f97378c2 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -2614,9 +2614,6 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}
- /* initialize FW coalescing state machines in RAM */
- bnx2x_update_coalesce(bp);
-
/* setup the leading queue */
rc = bnx2x_setup_leading(bp);
if (rc) {
@@ -4583,11 +4580,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
- u8 flags = REG_RD8(bp, addr);
+ u16 flags = REG_RD16(bp, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
- REG_WR8(bp, addr, flags);
+ REG_WR16(bp, addr, flags);
DP(NETIF_MSG_IFUP,
"port %x fw_sb_id %d sb_index %d disable %d\n",
port, fw_sb_id, sb_index, disable);
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 0283f343b0d1..77ebae0ac64a 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -13437,7 +13437,13 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
{
struct bnx2x *bp = params->bp;
u16 base_page, next_page, not_kr2_device, lane;
- int sigdet;
+ int sigdet = bnx2x_warpcore_get_sigdet(phy, params);
+
+ if (!sigdet) {
+ if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE))
+ bnx2x_kr2_recovery(params, vars, phy);
+ return;
+ }
/* Once KR2 was disabled, wait 5 seconds before checking KR2 recovery
* since some switches tend to reinit the AN process and clear the
@@ -13448,16 +13454,6 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
vars->check_kr2_recovery_cnt--;
return;
}
-
- sigdet = bnx2x_warpcore_get_sigdet(phy, params);
- if (!sigdet) {
- if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
- bnx2x_kr2_recovery(params, vars, phy);
- DP(NETIF_MSG_LINK, "No sigdet\n");
- }
- return;
- }
-
lane = bnx2x_get_warpcore_lane(phy, params);
CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
MDIO_AER_BLOCK_AER_REG, lane);
diff --git a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c50696b396f1..e81a747ea8ce 100644
--- a/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/trunk/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -4947,7 +4947,7 @@ static void bnx2x_after_function_update(struct bnx2x *bp)
q);
}
- if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
+ if (!NO_FCOE(bp)) {
fp = &bp->fp[FCOE_IDX(bp)];
queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
@@ -9878,10 +9878,6 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
- if (!CHIP_IS_E1x(bp))
- /* block FW from writing to host */
- REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
-
/* wait until BRB is empty */
tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
while (timer_count) {
@@ -13358,7 +13354,6 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
RCU_INIT_POINTER(bp->cnic_ops, NULL);
mutex_unlock(&bp->cnic_mutex);
synchronize_rcu();
- bp->cnic_enabled = false;
kfree(bp->cnic_kwq);
bp->cnic_kwq = NULL;
diff --git a/trunk/drivers/net/ethernet/broadcom/tg3.c b/trunk/drivers/net/ethernet/broadcom/tg3.c
index 17a972734ba7..67d2663b3974 100644
--- a/trunk/drivers/net/ethernet/broadcom/tg3.c
+++ b/trunk/drivers/net/ethernet/broadcom/tg3.c
@@ -14604,11 +14604,8 @@ static void tg3_read_vpd(struct tg3 *tp)
if (j + len > block_end)
goto partno;
- if (len >= sizeof(tp->fw_ver))
- len = sizeof(tp->fw_ver) - 1;
- memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
- snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
- &vpd_data[j]);
+ memcpy(tp->fw_ver, &vpd_data[j], len);
+ strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
}
partno:
diff --git a/trunk/drivers/net/ethernet/calxeda/xgmac.c b/trunk/drivers/net/ethernet/calxeda/xgmac.c
index b0ebc9f6d55e..a170065b5973 100644
--- a/trunk/drivers/net/ethernet/calxeda/xgmac.c
+++ b/trunk/drivers/net/ethernet/calxeda/xgmac.c
@@ -163,7 +163,6 @@
#define XGMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */
/* XGMAC_INT_STAT reg */
-#define XGMAC_INT_STAT_PMTIM 0x00800000 /* PMT Interrupt Mask */
#define XGMAC_INT_STAT_PMT 0x0080 /* PMT Interrupt Status */
#define XGMAC_INT_STAT_LPI 0x0040 /* LPI Interrupt Status */
@@ -961,9 +960,6 @@ static int xgmac_hw_init(struct net_device *dev)
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_STATUS);
writel(DMA_INTR_DEFAULT_MASK, ioaddr + XGMAC_DMA_INTR_ENA);
- /* Mask power mgt interrupt */
- writel(XGMAC_INT_STAT_PMTIM, ioaddr + XGMAC_INT_STAT);
-
/* XGMAC requires AXI bus init. This is a 'magic number' for now */
writel(0x0077000E, ioaddr + XGMAC_DMA_AXI_BUS);
@@ -1145,9 +1141,6 @@ static int xgmac_rx(struct xgmac_priv *priv, int limit)
struct sk_buff *skb;
int frame_len;
- if (!dma_ring_cnt(priv->rx_head, priv->rx_tail, DMA_RX_RING_SZ))
- break;
-
entry = priv->rx_tail;
p = priv->dma_rx + entry;
if (desc_get_owner(p))
@@ -1832,7 +1825,7 @@ static void xgmac_pmt(void __iomem *ioaddr, unsigned long mode)
unsigned int pmt = 0;
if (mode & WAKE_MAGIC)
- pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT_EN;
+ pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_MAGIC_PKT;
if (mode & WAKE_UCAST)
pmt |= XGMAC_PMT_POWERDOWN | XGMAC_PMT_GLBL_UNICAST;
diff --git a/trunk/drivers/net/ethernet/davicom/dm9000.c b/trunk/drivers/net/ethernet/davicom/dm9000.c
index 9eada8e86078..8cdf02503d13 100644
--- a/trunk/drivers/net/ethernet/davicom/dm9000.c
+++ b/trunk/drivers/net/ethernet/davicom/dm9000.c
@@ -257,107 +257,6 @@ static void dm9000_dumpblk_32bit(void __iomem *reg, int count)
tmp = readl(reg);
}
-/*
- * Sleep, either by using msleep() or if we are suspending, then
- * use mdelay() to sleep.
- */
-static void dm9000_msleep(board_info_t *db, unsigned int ms)
-{
- if (db->in_suspend)
- mdelay(ms);
- else
- msleep(ms);
-}
-
-/* Read a word from phyxcer */
-static int
-dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
-{
- board_info_t *db = netdev_priv(dev);
- unsigned long flags;
- unsigned int reg_save;
- int ret;
-
- mutex_lock(&db->addr_lock);
-
- spin_lock_irqsave(&db->lock, flags);
-
- /* Save previous register address */
- reg_save = readb(db->io_addr);
-
- /* Fill the phyxcer register into REG_0C */
- iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
- /* Issue phyxcer read command */
- iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS);
-
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock, flags);
-
- dm9000_msleep(db, 1); /* Wait read complete */
-
- spin_lock_irqsave(&db->lock, flags);
- reg_save = readb(db->io_addr);
-
- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
-
- /* The read data keeps on REG_0D & REG_0E */
- ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
-
- /* restore the previous address */
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock, flags);
-
- mutex_unlock(&db->addr_lock);
-
- dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
- return ret;
-}
-
-/* Write a word to phyxcer */
-static void
-dm9000_phy_write(struct net_device *dev,
- int phyaddr_unused, int reg, int value)
-{
- board_info_t *db = netdev_priv(dev);
- unsigned long flags;
- unsigned long reg_save;
-
- dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
- mutex_lock(&db->addr_lock);
-
- spin_lock_irqsave(&db->lock, flags);
-
- /* Save previous register address */
- reg_save = readb(db->io_addr);
-
- /* Fill the phyxcer register into REG_0C */
- iow(db, DM9000_EPAR, DM9000_PHY | reg);
-
- /* Fill the written data into REG_0D & REG_0E */
- iow(db, DM9000_EPDRL, value);
- iow(db, DM9000_EPDRH, value >> 8);
-
- /* Issue phyxcer write command */
- iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW);
-
- writeb(reg_save, db->io_addr);
- spin_unlock_irqrestore(&db->lock, flags);
-
- dm9000_msleep(db, 1); /* Wait write complete */
-
- spin_lock_irqsave(&db->lock, flags);
- reg_save = readb(db->io_addr);
-
- iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
-
- /* restore the previous address */
- writeb(reg_save, db->io_addr);
-
- spin_unlock_irqrestore(&db->lock, flags);
- mutex_unlock(&db->addr_lock);
-}
-
/* dm9000_set_io
*
* select the specified set of io routines to use with the
@@ -896,9 +795,6 @@ dm9000_init_dm9000(struct net_device *dev)
iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
- dm9000_phy_write(dev, 0, MII_BMCR, BMCR_RESET); /* PHY RESET */
- dm9000_phy_write(dev, 0, MII_DM_DSPCR, DSPCR_INIT_PARAM); /* Init */
-
ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
/* if wol is needed, then always set NCR_WAKEEN otherwise we end
@@ -1305,6 +1201,109 @@ dm9000_open(struct net_device *dev)
return 0;
}
+/*
+ * Sleep, either by using msleep() or if we are suspending, then
+ * use mdelay() to sleep.
+ */
+static void dm9000_msleep(board_info_t *db, unsigned int ms)
+{
+ if (db->in_suspend)
+ mdelay(ms);
+ else
+ msleep(ms);
+}
+
+/*
+ * Read a word from phyxcer
+ */
+static int
+dm9000_phy_read(struct net_device *dev, int phy_reg_unused, int reg)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned int reg_save;
+ int ret;
+
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock,flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ iow(db, DM9000_EPCR, EPCR_ERPRR | EPCR_EPOS); /* Issue phyxcer read command */
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock,flags);
+
+ dm9000_msleep(db, 1); /* Wait read complete */
+
+ spin_lock_irqsave(&db->lock,flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer read command */
+
+ /* The read data keeps on REG_0D & REG_0E */
+ ret = (ior(db, DM9000_EPDRH) << 8) | ior(db, DM9000_EPDRL);
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock,flags);
+
+ mutex_unlock(&db->addr_lock);
+
+ dm9000_dbg(db, 5, "phy_read[%02x] -> %04x\n", reg, ret);
+ return ret;
+}
+
+/*
+ * Write a word to phyxcer
+ */
+static void
+dm9000_phy_write(struct net_device *dev,
+ int phyaddr_unused, int reg, int value)
+{
+ board_info_t *db = netdev_priv(dev);
+ unsigned long flags;
+ unsigned long reg_save;
+
+ dm9000_dbg(db, 5, "phy_write[%02x] = %04x\n", reg, value);
+ mutex_lock(&db->addr_lock);
+
+ spin_lock_irqsave(&db->lock,flags);
+
+ /* Save previous register address */
+ reg_save = readb(db->io_addr);
+
+ /* Fill the phyxcer register into REG_0C */
+ iow(db, DM9000_EPAR, DM9000_PHY | reg);
+
+ /* Fill the written data into REG_0D & REG_0E */
+ iow(db, DM9000_EPDRL, value);
+ iow(db, DM9000_EPDRH, value >> 8);
+
+ iow(db, DM9000_EPCR, EPCR_EPOS | EPCR_ERPRW); /* Issue phyxcer write command */
+
+ writeb(reg_save, db->io_addr);
+ spin_unlock_irqrestore(&db->lock, flags);
+
+ dm9000_msleep(db, 1); /* Wait write complete */
+
+ spin_lock_irqsave(&db->lock,flags);
+ reg_save = readb(db->io_addr);
+
+ iow(db, DM9000_EPCR, 0x0); /* Clear phyxcer write command */
+
+ /* restore the previous address */
+ writeb(reg_save, db->io_addr);
+
+ spin_unlock_irqrestore(&db->lock, flags);
+ mutex_unlock(&db->addr_lock);
+}
+
static void
dm9000_shutdown(struct net_device *dev)
{
@@ -1503,12 +1502,7 @@ dm9000_probe(struct platform_device *pdev)
db->flags |= DM9000_PLATF_SIMPLE_PHY;
#endif
- /* Fixing bug on dm9000_probe, takeover dm9000_reset(db),
- * Need 'NCR_MAC_LBK' bit to indeed stable our DM9000 fifo
- * while probe stage.
- */
-
- iow(db, DM9000_NCR, NCR_MAC_LBK | NCR_RST);
+ dm9000_reset(db);
/* try multiple times, DM9000 sometimes gets the read wrong */
for (i = 0; i < 8; i++) {
diff --git a/trunk/drivers/net/ethernet/davicom/dm9000.h b/trunk/drivers/net/ethernet/davicom/dm9000.h
index 9ce058adabab..55688bd1a3ef 100644
--- a/trunk/drivers/net/ethernet/davicom/dm9000.h
+++ b/trunk/drivers/net/ethernet/davicom/dm9000.h
@@ -69,9 +69,7 @@
#define NCR_WAKEEN (1<<6)
#define NCR_FCOL (1<<4)
#define NCR_FDX (1<<3)
-
-#define NCR_RESERVED (3<<1)
-#define NCR_MAC_LBK (1<<1)
+#define NCR_LBK (3<<1)
#define NCR_RST (1<<0)
#define NSR_SPEED (1<<7)
@@ -169,12 +167,5 @@
#define ISR_LNKCHNG (1<<5)
#define ISR_UNDERRUN (1<<4)
-/* Davicom MII registers.
- */
-
-#define MII_DM_DSPCR 0x1b /* DSP Control Register */
-
-#define DSPCR_INIT_PARAM 0xE100 /* DSP init parameter */
-
#endif /* _DM9000X_H_ */
diff --git a/trunk/drivers/net/ethernet/emulex/benet/be_main.c b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
index 2886c9b63f90..08e54f3d288b 100644
--- a/trunk/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/trunk/drivers/net/ethernet/emulex/benet/be_main.c
@@ -759,9 +759,8 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
if (vlan_tx_tag_present(skb)) {
vlan_tag = be_get_tx_vlan_tag(adapter, skb);
- skb = __vlan_put_tag(skb, vlan_tag);
- if (skb)
- skb->vlan_tci = 0;
+ __vlan_put_tag(skb, vlan_tag);
+ skb->vlan_tci = 0;
}
return skb;
diff --git a/trunk/drivers/net/ethernet/freescale/fec.c b/trunk/drivers/net/ethernet/freescale/fec.c
index 73195f643c9c..911d0253dbb2 100644
--- a/trunk/drivers/net/ethernet/freescale/fec.c
+++ b/trunk/drivers/net/ethernet/freescale/fec.c
@@ -345,53 +345,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
-/* Init RX & TX buffer descriptors
- */
-static void fec_enet_bd_init(struct net_device *dev)
-{
- struct fec_enet_private *fep = netdev_priv(dev);
- struct bufdesc *bdp;
- unsigned int i;
-
- /* Initialize the receive buffer descriptors. */
- bdp = fep->rx_bd_base;
- for (i = 0; i < RX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- if (bdp->cbd_bufaddr)
- bdp->cbd_sc = BD_ENET_RX_EMPTY;
- else
- bdp->cbd_sc = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- }
-
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
- bdp->cbd_sc |= BD_SC_WRAP;
-
- fep->cur_rx = fep->rx_bd_base;
-
- /* ...and the same for transmit */
- bdp = fep->tx_bd_base;
- fep->cur_tx = bdp;
- for (i = 0; i < TX_RING_SIZE; i++) {
-
- /* Initialize the BD for every fragment in the page. */
- bdp->cbd_sc = 0;
- if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
- dev_kfree_skb_any(fep->tx_skbuff[i]);
- fep->tx_skbuff[i] = NULL;
- }
- bdp->cbd_bufaddr = 0;
- bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
- }
-
- /* Set the last buffer to wrap */
- bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
- bdp->cbd_sc |= BD_SC_WRAP;
- fep->dirty_tx = bdp;
-}
-
/* This function is called to start or restart the FEC during a link
* change. This only happens when switching between half and full
* duplex.
@@ -435,8 +388,6 @@ fec_restart(struct net_device *ndev, int duplex)
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
- fec_enet_bd_init(ndev);
-
/* Set receive and transmit descriptor base. */
writel(fep->bd_dma, fep->hwp + FEC_R_DES_START);
if (fep->bufdesc_ex)
@@ -446,6 +397,7 @@ fec_restart(struct net_device *ndev, int duplex)
writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc)
* RX_RING_SIZE, fep->hwp + FEC_X_DES_START);
+ fep->cur_rx = fep->rx_bd_base;
for (i = 0; i <= TX_RING_MOD_MASK; i++) {
if (fep->tx_skbuff[i]) {
@@ -1002,7 +954,6 @@ static void fec_enet_adjust_link(struct net_device *ndev)
} else {
if (fep->link) {
fec_stop(ndev);
- fep->link = phy_dev->link;
status_change = 1;
}
}
@@ -1646,6 +1597,8 @@ static int fec_enet_init(struct net_device *ndev)
{
struct fec_enet_private *fep = netdev_priv(ndev);
struct bufdesc *cbd_base;
+ struct bufdesc *bdp;
+ unsigned int i;
/* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
@@ -1655,7 +1608,6 @@ static int fec_enet_init(struct net_device *ndev)
return -ENOMEM;
}
- memset(cbd_base, 0, PAGE_SIZE);
spin_lock_init(&fep->hw_lock);
fep->netdev = ndev;
@@ -1679,6 +1631,35 @@ static int fec_enet_init(struct net_device *ndev)
writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, FEC_NAPI_WEIGHT);
+ /* Initialize the receive buffer descriptors. */
+ bdp = fep->rx_bd_base;
+ for (i = 0; i < RX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp->cbd_sc |= BD_SC_WRAP;
+
+ /* ...and the same for transmit */
+ bdp = fep->tx_bd_base;
+ fep->cur_tx = bdp;
+ for (i = 0; i < TX_RING_SIZE; i++) {
+
+ /* Initialize the BD for every fragment in the page. */
+ bdp->cbd_sc = 0;
+ bdp->cbd_bufaddr = 0;
+ bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
+ }
+
+ /* Set the last buffer to wrap */
+ bdp = fec_enet_get_prevdesc(bdp, fep->bufdesc_ex);
+ bdp->cbd_sc |= BD_SC_WRAP;
+ fep->dirty_tx = bdp;
+
fec_restart(ndev, 0);
return 0;
diff --git a/trunk/drivers/net/ethernet/intel/e100.c b/trunk/drivers/net/ethernet/intel/e100.c
index d2bea3f07c73..ec800b093e7e 100644
--- a/trunk/drivers/net/ethernet/intel/e100.c
+++ b/trunk/drivers/net/ethernet/intel/e100.c
@@ -870,7 +870,7 @@ static int e100_exec_cmd(struct nic *nic, u8 cmd, dma_addr_t dma_addr)
}
static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
- int (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
+ void (*cb_prepare)(struct nic *, struct cb *, struct sk_buff *))
{
struct cb *cb;
unsigned long flags;
@@ -888,13 +888,10 @@ static int e100_exec_cb(struct nic *nic, struct sk_buff *skb,
nic->cbs_avail--;
cb->skb = skb;
- err = cb_prepare(nic, cb, skb);
- if (err)
- goto err_unlock;
-
if (unlikely(!nic->cbs_avail))
err = -ENOSPC;
+ cb_prepare(nic, cb, skb);
/* Order is important otherwise we'll be in a race with h/w:
* set S-bit in current first, then clear S-bit in previous. */
@@ -1094,7 +1091,7 @@ static void e100_get_defaults(struct nic *nic)
nic->mii.mdio_write = mdio_write;
}
-static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct config *config = &cb->u.config;
u8 *c = (u8 *)config;
@@ -1184,7 +1181,6 @@ static int e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
netif_printk(nic, hw, KERN_DEBUG, nic->netdev,
"[16-23]=%02X:%02X:%02X:%02X:%02X:%02X:%02X:%02X\n",
c[16], c[17], c[18], c[19], c[20], c[21], c[22], c[23]);
- return 0;
}
/*************************************************************************
@@ -1335,7 +1331,7 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
return fw;
}
-static int e100_setup_ucode(struct nic *nic, struct cb *cb,
+static void e100_setup_ucode(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
const struct firmware *fw = (void *)skb;
@@ -1362,7 +1358,6 @@ static int e100_setup_ucode(struct nic *nic, struct cb *cb,
cb->u.ucode[min_size] |= cpu_to_le32((BUNDLESMALL) ? 0xFFFF : 0xFF80);
cb->command = cpu_to_le16(cb_ucode | cb_el);
- return 0;
}
static inline int e100_load_ucode_wait(struct nic *nic)
@@ -1405,20 +1400,18 @@ static inline int e100_load_ucode_wait(struct nic *nic)
return err;
}
-static int e100_setup_iaaddr(struct nic *nic, struct cb *cb,
+static void e100_setup_iaaddr(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_iaaddr);
memcpy(cb->u.iaaddr, nic->netdev->dev_addr, ETH_ALEN);
- return 0;
}
-static int e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_dump(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
cb->command = cpu_to_le16(cb_dump);
cb->u.dump_buffer_addr = cpu_to_le32(nic->dma_addr +
offsetof(struct mem, dump_buf));
- return 0;
}
static int e100_phy_check_without_mii(struct nic *nic)
@@ -1588,7 +1581,7 @@ static int e100_hw_init(struct nic *nic)
return 0;
}
-static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
+static void e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
{
struct net_device *netdev = nic->netdev;
struct netdev_hw_addr *ha;
@@ -1603,7 +1596,6 @@ static int e100_multi(struct nic *nic, struct cb *cb, struct sk_buff *skb)
memcpy(&cb->u.multi.addr[i++ * ETH_ALEN], &ha->addr,
ETH_ALEN);
}
- return 0;
}
static void e100_set_multicast_list(struct net_device *netdev)
@@ -1764,18 +1756,11 @@ static void e100_watchdog(unsigned long data)
round_jiffies(jiffies + E100_WATCHDOG_PERIOD));
}
-static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
+static void e100_xmit_prepare(struct nic *nic, struct cb *cb,
struct sk_buff *skb)
{
- dma_addr_t dma_addr;
cb->command = nic->tx_command;
- dma_addr = pci_map_single(nic->pdev,
- skb->data, skb->len, PCI_DMA_TODEVICE);
- /* If we can't map the skb, have the upper layer try later */
- if (pci_dma_mapping_error(nic->pdev, dma_addr))
- return -ENOMEM;
-
/*
* Use the last 4 bytes of the SKB payload packet as the CRC, used for
* testing, ie sending frames with bad CRC.
@@ -1792,10 +1777,11 @@ static int e100_xmit_prepare(struct nic *nic, struct cb *cb,
cb->u.tcb.tcb_byte_count = 0;
cb->u.tcb.threshold = nic->tx_threshold;
cb->u.tcb.tbd_count = 1;
- cb->u.tcb.tbd.buf_addr = cpu_to_le32(dma_addr);
+ cb->u.tcb.tbd.buf_addr = cpu_to_le32(pci_map_single(nic->pdev,
+ skb->data, skb->len, PCI_DMA_TODEVICE));
+ /* check for mapping failure? */
cb->u.tcb.tbd.size = cpu_to_le16(skb->len);
skb_tx_timestamp(skb);
- return 0;
}
static netdev_tx_t e100_xmit_frame(struct sk_buff *skb,
diff --git a/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index ffd287196bf8..43462d596a4e 100644
--- a/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/trunk/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -1053,10 +1053,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
txdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (dma_mapping_error(&pdev->dev, txdr->buffer_info[i].dma)) {
- ret_val = 4;
- goto err_nomem;
- }
tx_desc->buffer_addr = cpu_to_le64(txdr->buffer_info[i].dma);
tx_desc->lower.data = cpu_to_le32(skb->len);
tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
@@ -1073,7 +1069,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info = kcalloc(rxdr->count, sizeof(struct e1000_buffer),
GFP_KERNEL);
if (!rxdr->buffer_info) {
- ret_val = 5;
+ ret_val = 4;
goto err_nomem;
}
@@ -1081,7 +1077,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL);
if (!rxdr->desc) {
- ret_val = 6;
+ ret_val = 5;
goto err_nomem;
}
memset(rxdr->desc, 0, rxdr->size);
@@ -1105,7 +1101,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN, GFP_KERNEL);
if (!skb) {
- ret_val = 7;
+ ret_val = 6;
goto err_nomem;
}
skb_reserve(skb, NET_IP_ALIGN);
@@ -1114,10 +1110,6 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
rxdr->buffer_info[i].dma =
dma_map_single(&pdev->dev, skb->data,
E1000_RXBUFFER_2048, DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, rxdr->buffer_info[i].dma)) {
- ret_val = 8;
- goto err_nomem;
- }
rx_desc->buffer_addr = cpu_to_le64(rxdr->buffer_info[i].dma);
memset(skb->data, 0x00, skb->len);
}
diff --git a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c
index 7e615e2bf7e6..948b86ffa4f0 100644
--- a/trunk/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/trunk/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -848,16 +848,11 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
}
}
- if (!buffer_info->dma) {
+ if (!buffer_info->dma)
buffer_info->dma = dma_map_page(&pdev->dev,
buffer_info->page, 0,
PAGE_SIZE,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
- }
rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
diff --git a/trunk/drivers/net/ethernet/intel/igb/igb.h b/trunk/drivers/net/ethernet/intel/igb/igb.h
index ab577a763a20..25151401c2ab 100644
--- a/trunk/drivers/net/ethernet/intel/igb/igb.h
+++ b/trunk/drivers/net/ethernet/intel/igb/igb.h
@@ -284,10 +284,18 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+ IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};
+#define ring_uses_build_skb(ring) \
+ test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define set_ring_build_skb_enabled(ring) \
+ set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+#define clear_ring_build_skb_enabled(ring) \
+ clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
+
#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
#define IGB_RX_DESC(R, i) \
diff --git a/trunk/drivers/net/ethernet/intel/igb/igb_main.c b/trunk/drivers/net/ethernet/intel/igb/igb_main.c
index 64f75291e3a5..8496adfc6a68 100644
--- a/trunk/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/trunk/drivers/net/ethernet/intel/igb/igb_main.c
@@ -3350,6 +3350,20 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}
+static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
+ struct igb_ring *rx_ring)
+{
+#define IGB_MAX_BUILD_SKB_SIZE \
+ (SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
+ (NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))
+
+ /* set build_skb flag */
+ if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
+ set_ring_build_skb_enabled(rx_ring);
+ else
+ clear_ring_build_skb_enabled(rx_ring);
+}
+
/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
@@ -3369,8 +3383,11 @@ static void igb_configure_rx(struct igb_adapter *adapter)
/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
- for (i = 0; i < adapter->num_rx_queues; i++)
- igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+ for (i = 0; i < adapter->num_rx_queues; i++) {
+ struct igb_ring *rx_ring = adapter->rx_ring[i];
+ igb_set_rx_buffer_len(adapter, rx_ring);
+ igb_configure_rx_ring(adapter, rx_ring);
+ }
}
/**
@@ -6186,6 +6203,78 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}
+static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
+ union e1000_adv_rx_desc *rx_desc)
+{
+ struct igb_rx_buffer *rx_buffer;
+ struct sk_buff *skb;
+ struct page *page;
+ void *page_addr;
+ unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
+#if (PAGE_SIZE < 8192)
+ unsigned int truesize = IGB_RX_BUFSZ;
+#else
+ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+ SKB_DATA_ALIGN(NET_SKB_PAD +
+ NET_IP_ALIGN +
+ size);
+#endif
+
+ /* If we spanned a buffer we have a huge mess so test for it */
+ BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));
+
+ rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+ page = rx_buffer->page;
+ prefetchw(page);
+
+ page_addr = page_address(page) + rx_buffer->page_offset;
+
+ /* prefetch first cache line of first page */
+ prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
+#if L1_CACHE_BYTES < 128
+ prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
+#endif
+
+ /* build an skb to around the page buffer */
+ skb = build_skb(page_addr, truesize);
+ if (unlikely(!skb)) {
+ rx_ring->rx_stats.alloc_failed++;
+ return NULL;
+ }
+
+ /* we are reusing so sync this buffer for CPU use */
+ dma_sync_single_range_for_cpu(rx_ring->dev,
+ rx_buffer->dma,
+ rx_buffer->page_offset,
+ IGB_RX_BUFSZ,
+ DMA_FROM_DEVICE);
+
+ /* update pointers within the skb to store the data */
+ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
+ __skb_put(skb, size);
+
+ /* pull timestamp out of packet data */
+ if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
+ igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
+ __skb_pull(skb, IGB_TS_HDR_LEN);
+ }
+
+ if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
+ /* hand second half of page back to the ring */
+ igb_reuse_rx_page(rx_ring, rx_buffer);
+ } else {
+ /* we are not reusing the buffer so unmap it */
+ dma_unmap_page(rx_ring->dev, rx_buffer->dma,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ }
+
+ /* clear contents of buffer_info */
+ rx_buffer->dma = 0;
+ rx_buffer->page = NULL;
+
+ return skb;
+}
+
static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
@@ -6601,7 +6690,10 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rmb();
/* retrieve a buffer from the ring */
- skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
+ if (ring_uses_build_skb(rx_ring))
+ skb = igb_build_rx_buffer(rx_ring, rx_desc);
+ else
+ skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
/* exit if we failed to retrieve a buffer */
if (!skb)
@@ -6688,6 +6780,14 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}
+static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
+{
+ if (ring_uses_build_skb(rx_ring))
+ return NET_SKB_PAD + NET_IP_ALIGN;
+ else
+ return 0;
+}
+
/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
@@ -6714,7 +6814,9 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
- rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);
+ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
+ bi->page_offset +
+ igb_rx_offset(rx_ring));
rx_desc++;
bi++;
diff --git a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index b5f94abe3cff..ea4808373435 100644
--- a/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/trunk/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -2159,10 +2159,6 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
skb->data,
adapter->rx_buffer_len,
DMA_FROM_DEVICE);
- if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
- adapter->alloc_rx_buff_failed++;
- break;
- }
rx_desc = IXGB_RX_DESC(*rx_ring, i);
rx_desc->buff_addr = cpu_to_le64(buffer_info->dma);
@@ -2172,8 +2168,7 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
rx_desc->status = 0;
- if (++i == rx_ring->count)
- i = 0;
+ if (++i == rx_ring->count) i = 0;
buffer_info = &rx_ring->buffer_info[i];
}
diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 79f4a26ea6cc..db5611ae407e 100644
--- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -7922,19 +7922,12 @@ static int __init ixgbe_init_module(void)
ixgbe_dbg_init();
#endif /* CONFIG_DEBUG_FS */
- ret = pci_register_driver(&ixgbe_driver);
- if (ret) {
-#ifdef CONFIG_DEBUG_FS
- ixgbe_dbg_exit();
-#endif /* CONFIG_DEBUG_FS */
- return ret;
- }
-
#ifdef CONFIG_IXGBE_DCA
dca_register_notify(&dca_notifier);
#endif
- return 0;
+ ret = pci_register_driver(&ixgbe_driver);
+ return ret;
}
module_init(ixgbe_init_module);
diff --git a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index 97e33669c0b9..d44b4d21268c 100644
--- a/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/trunk/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -1049,12 +1049,6 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (vlan || qos) {
- if (adapter->vfinfo[vf].pf_vlan)
- err = ixgbe_set_vf_vlan(adapter, false,
- adapter->vfinfo[vf].pf_vlan,
- vf);
- if (err)
- goto out;
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
diff --git a/trunk/drivers/net/ethernet/marvell/Kconfig b/trunk/drivers/net/ethernet/marvell/Kconfig
index 434e33c527df..edfba9370922 100644
--- a/trunk/drivers/net/ethernet/marvell/Kconfig
+++ b/trunk/drivers/net/ethernet/marvell/Kconfig
@@ -33,7 +33,6 @@ config MV643XX_ETH
config MVMDIO
tristate "Marvell MDIO interface support"
- select PHYLIB
---help---
This driver supports the MDIO interface found in the network
interface units of the Marvell EBU SoCs (Kirkwood, Orion5x,
@@ -46,6 +45,7 @@ config MVMDIO
config MVNETA
tristate "Marvell Armada 370/XP network interface support"
depends on MACH_ARMADA_370_XP
+ select PHYLIB
select MVMDIO
---help---
This driver supports the network interface units in the
diff --git a/trunk/drivers/net/ethernet/marvell/mvneta.c b/trunk/drivers/net/ethernet/marvell/mvneta.c
index a47a097c21e1..cd345b8969bc 100644
--- a/trunk/drivers/net/ethernet/marvell/mvneta.c
+++ b/trunk/drivers/net/ethernet/marvell/mvneta.c
@@ -374,6 +374,7 @@ static int rxq_number = 8;
static int txq_number = 8;
static int rxq_def;
+static int txq_def;
#define MVNETA_DRIVER_NAME "mvneta"
#define MVNETA_DRIVER_VERSION "1.0"
@@ -1474,8 +1475,7 @@ static int mvneta_tx_frag_process(struct mvneta_port *pp, struct sk_buff *skb,
static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- u16 txq_id = skb_get_queue_mapping(skb);
- struct mvneta_tx_queue *txq = &pp->txqs[txq_id];
+ struct mvneta_tx_queue *txq = &pp->txqs[txq_def];
struct mvneta_tx_desc *tx_desc;
struct netdev_queue *nq;
int frags = 0;
@@ -1485,7 +1485,7 @@ static int mvneta_tx(struct sk_buff *skb, struct net_device *dev)
goto out;
frags = skb_shinfo(skb)->nr_frags + 1;
- nq = netdev_get_tx_queue(dev, txq_id);
+ nq = netdev_get_tx_queue(dev, txq_def);
/* Get a descriptor for the first part of the packet */
tx_desc = mvneta_txq_next_desc_get(txq);
@@ -2689,7 +2689,7 @@ static int mvneta_probe(struct platform_device *pdev)
return -EINVAL;
}
- dev = alloc_etherdev_mqs(sizeof(struct mvneta_port), txq_number, rxq_number);
+ dev = alloc_etherdev_mq(sizeof(struct mvneta_port), 8);
if (!dev)
return -ENOMEM;
@@ -2771,17 +2771,16 @@ static int mvneta_probe(struct platform_device *pdev)
netif_napi_add(dev, &pp->napi, mvneta_poll, pp->weight);
- dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->vlan_features |= NETIF_F_SG | NETIF_F_IP_CSUM;
- dev->priv_flags |= IFF_UNICAST_FLT;
-
err = register_netdev(dev);
if (err < 0) {
dev_err(&pdev->dev, "failed to register\n");
goto err_deinit;
}
+ dev->features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM;
+ dev->priv_flags |= IFF_UNICAST_FLT;
+
netdev_info(dev, "mac: %pM\n", dev->dev_addr);
platform_set_drvdata(pdev, pp->dev);
@@ -2844,3 +2843,4 @@ module_param(rxq_number, int, S_IRUGO);
module_param(txq_number, int, S_IRUGO);
module_param(rxq_def, int, S_IRUGO);
+module_param(txq_def, int, S_IRUGO);
diff --git a/trunk/drivers/net/ethernet/marvell/sky2.c b/trunk/drivers/net/ethernet/marvell/sky2.c
index 6a0e671fcecd..fc07ca35721b 100644
--- a/trunk/drivers/net/ethernet/marvell/sky2.c
+++ b/trunk/drivers/net/ethernet/marvell/sky2.c
@@ -1067,7 +1067,7 @@ static void sky2_ramset(struct sky2_hw *hw, u16 q, u32 start, u32 space)
sky2_write32(hw, RB_ADDR(q, RB_RX_UTHP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTHP), space/2);
- tp = space - 8192/8;
+ tp = space - 2048/8;
sky2_write32(hw, RB_ADDR(q, RB_RX_UTPP), tp);
sky2_write32(hw, RB_ADDR(q, RB_RX_LTPP), space/4);
} else {
diff --git a/trunk/drivers/net/ethernet/marvell/sky2.h b/trunk/drivers/net/ethernet/marvell/sky2.h
index ec6dcd80152b..615ac63ea860 100644
--- a/trunk/drivers/net/ethernet/marvell/sky2.h
+++ b/trunk/drivers/net/ethernet/marvell/sky2.h
@@ -2074,7 +2074,7 @@ enum {
GM_IS_RX_FF_OR = 1<<1, /* Receive FIFO Overrun */
GM_IS_RX_COMPL = 1<<0, /* Frame Reception Complete */
-#define GMAC_DEF_MSK (GM_IS_TX_FF_UR | GM_IS_RX_FF_OR)
+#define GMAC_DEF_MSK GM_IS_TX_FF_UR
};
/* GMAC_LINK_CTRL 16 bit GMAC Link Control Reg (YUKON only) */
diff --git a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 30d78f806dc3..f278b10ef714 100644
--- a/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/trunk/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -411,8 +411,8 @@ static int mlx4_en_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
static void mlx4_en_u64_to_mac(unsigned char dst_mac[ETH_ALEN + 2], u64 src_mac)
{
- int i;
- for (i = ETH_ALEN - 1; i >= 0; --i) {
+ unsigned int i;
+ for (i = ETH_ALEN - 1; i; --i) {
dst_mac[i] = src_mac & 0xff;
src_mac >>= 8;
}
diff --git a/trunk/drivers/net/ethernet/micrel/ks8851.c b/trunk/drivers/net/ethernet/micrel/ks8851.c
index 8fb481252e2c..33bcb63d56a2 100644
--- a/trunk/drivers/net/ethernet/micrel/ks8851.c
+++ b/trunk/drivers/net/ethernet/micrel/ks8851.c
@@ -528,7 +528,7 @@ static void ks8851_rx_pkts(struct ks8851_net *ks)
for (; rxfc != 0; rxfc--) {
rxh = ks8851_rdreg32(ks, KS_RXFHSR);
rxstat = rxh & 0xffff;
- rxlen = (rxh >> 16) & 0xfff;
+ rxlen = rxh >> 16;
netif_dbg(ks, rx_status, ks->netdev,
"rx: stat 0x%04x, len 0x%04x\n", rxstat, rxlen);
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index edd63f1230f3..cd5ae8813cb3 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -1500,12 +1500,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
}
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
- /* Make sure carrier is off and queue is stopped during loopback */
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_stop_queue(netdev);
- }
-
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2786,7 +2780,6 @@ static u64 *qlcnic_83xx_fill_stats(struct qlcnic_adapter *adapter,
void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
{
struct qlcnic_cmd_args cmd;
- struct net_device *netdev = adapter->netdev;
int ret = 0;
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_STATISTICS);
@@ -2796,7 +2789,7 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_TX, &ret);
if (ret) {
- netdev_err(netdev, "Error getting Tx stats\n");
+ dev_info(&adapter->pdev->dev, "Error getting MAC stats\n");
goto out;
}
/* Get MAC stats */
@@ -2806,7 +2799,8 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_MAC, &ret);
if (ret) {
- netdev_err(netdev, "Error getting MAC stats\n");
+ dev_info(&adapter->pdev->dev,
+ "Error getting Rx stats\n");
goto out;
}
/* Get Rx stats */
@@ -2816,7 +2810,8 @@ void qlcnic_83xx_get_stats(struct qlcnic_adapter *adapter, u64 *data)
data = qlcnic_83xx_fill_stats(adapter, &cmd, data,
QLC_83XX_STAT_RX, &ret);
if (ret)
- netdev_err(netdev, "Error getting Rx stats\n");
+ dev_info(&adapter->pdev->dev,
+ "Error getting Tx stats\n");
out:
qlcnic_free_mbx_args(&cmd);
}
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 5fa847fe388a..0e630061bff3 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -358,7 +358,8 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter,
memcpy(&first_desc->eth_addr, skb->data, ETH_ALEN);
}
opcode = TX_ETHER_PKT;
- if (skb_is_gso(skb)) {
+ if ((adapter->netdev->features & (NETIF_F_TSO | NETIF_F_TSO6)) &&
+ skb_shinfo(skb)->gso_size > 0) {
hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
first_desc->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
first_desc->total_hdr_length = hdr_len;
diff --git a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 5ef328af61d0..987fb6f8adc3 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -200,10 +200,10 @@ static ssize_t qlcnic_store_beacon(struct device *dev,
}
err = qlcnic_config_led(adapter, b_state, b_rate);
- if (!err) {
+ if (!err)
err = len;
+ else
ahw->beacon_state = b_state;
- }
if (test_and_clear_bit(__QLCNIC_DIAG_RES_ALLOC, &adapter->state))
qlcnic_diag_free_res(adapter->netdev, max_sds_rings);
diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h b/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h
index 7e8d68263963..a131d7b5d2fe 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "v1.00.00.32"
+#define DRV_VERSION "v1.00.00.31"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 0780e039b271..6f316ab23257 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -379,13 +379,13 @@ static int ql_get_settings(struct net_device *ndev,
ecmd->supported = SUPPORTED_10000baseT_Full;
ecmd->advertising = ADVERTISED_10000baseT_Full;
+ ecmd->autoneg = AUTONEG_ENABLE;
ecmd->transceiver = XCVR_EXTERNAL;
if ((qdev->link_status & STS_LINK_TYPE_MASK) ==
STS_LINK_TYPE_10GBASET) {
ecmd->supported |= (SUPPORTED_TP | SUPPORTED_Autoneg);
ecmd->advertising |= (ADVERTISED_TP | ADVERTISED_Autoneg);
ecmd->port = PORT_TP;
- ecmd->autoneg = AUTONEG_ENABLE;
} else {
ecmd->supported |= SUPPORTED_FIBRE;
ecmd->advertising |= ADVERTISED_FIBRE;
diff --git a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 8033555e53c2..b13ab544a7eb 100644
--- a/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/trunk/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -1434,13 +1434,11 @@ static int ql_map_send(struct ql_adapter *qdev,
}
/* Categorizing receive firmware frame errors */
-static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err,
- struct rx_ring *rx_ring)
+static void ql_categorize_rx_err(struct ql_adapter *qdev, u8 rx_err)
{
struct nic_stats *stats = &qdev->nic_stats;
stats->rx_err_count++;
- rx_ring->rx_errors++;
switch (rx_err & IB_MAC_IOCB_RSP_ERR_MASK) {
case IB_MAC_IOCB_RSP_ERR_CODE_ERR:
@@ -1476,12 +1474,6 @@ static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
struct napi_struct *napi = &rx_ring->napi;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- put_page(lbq_desc->p.pg_chunk.page);
- return;
- }
napi->dev = qdev->ndev;
skb = napi_get_frags(napi);
@@ -1537,12 +1529,6 @@ static void ql_process_mac_rx_page(struct ql_adapter *qdev,
addr = lbq_desc->p.pg_chunk.va;
prefetch(addr);
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- goto err_out;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -1628,13 +1614,6 @@ static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
memcpy(skb_put(new_skb, length), skb->data, length);
skb = new_skb;
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
/* loopback self test for ethtool */
if (test_bit(QL_SELFTEST, &qdev->flags)) {
ql_check_lb_frame(qdev, skb);
@@ -1940,13 +1919,6 @@ static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
return;
}
- /* Frame error, so drop the packet. */
- if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
- ql_categorize_rx_err(qdev, ib_mac_rsp->flags2, rx_ring);
- dev_kfree_skb_any(skb);
- return;
- }
-
/* The max framesize filter on this chip is set higher than
* MTU since FCoE uses 2k frames.
*/
@@ -2028,6 +2000,12 @@ static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
+ /* Frame error, so drop the packet. */
+ if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
+ ql_categorize_rx_err(qdev, ib_mac_rsp->flags2);
+ return (unsigned long)length;
+ }
+
if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
/* The data and headers are split into
* separate buffers.
diff --git a/trunk/drivers/net/ethernet/realtek/r8169.c b/trunk/drivers/net/ethernet/realtek/r8169.c
index 4ecbe64a758d..28fb50a1e9c3 100644
--- a/trunk/drivers/net/ethernet/realtek/r8169.c
+++ b/trunk/drivers/net/ethernet/realtek/r8169.c
@@ -3818,30 +3818,6 @@ static void rtl_init_mdio_ops(struct rtl8169_private *tp)
}
}
-static void rtl_speed_down(struct rtl8169_private *tp)
-{
- u32 adv;
- int lpa;
-
- rtl_writephy(tp, 0x1f, 0x0000);
- lpa = rtl_readphy(tp, MII_LPA);
-
- if (lpa & (LPA_10HALF | LPA_10FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
- else if (lpa & (LPA_100HALF | LPA_100FULL))
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
- else
- adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
- ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
- (tp->mii.supports_gmii ?
- ADVERTISED_1000baseT_Half |
- ADVERTISED_1000baseT_Full : 0);
-
- rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
- adv);
-}
-
static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
{
void __iomem *ioaddr = tp->mmio_addr;
@@ -3872,7 +3848,9 @@ static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
return false;
- rtl_speed_down(tp);
+ rtl_writephy(tp, 0x1f, 0x0000);
+ rtl_writephy(tp, MII_BMCR, 0x0000);
+
rtl_wol_suspend_quirk(tp);
return true;
diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.c b/trunk/drivers/net/ethernet/renesas/sh_eth.c
index 6ed333fe5c04..bf5e3cf97c4d 100644
--- a/trunk/drivers/net/ethernet/renesas/sh_eth.c
+++ b/trunk/drivers/net/ethernet/renesas/sh_eth.c
@@ -1216,7 +1216,10 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
if (felic_stat & ECSR_LCHNG) {
/* Link Changed */
if (mdp->cd->no_psr || mdp->no_ether_link) {
- goto ignore_link;
+ if (mdp->link == PHY_DOWN)
+ link_stat = 0;
+ else
+ link_stat = PHY_ST_LINK;
} else {
link_stat = (sh_eth_read(ndev, PSR));
if (mdp->ether_link_active_low)
@@ -1239,7 +1242,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
}
}
-ignore_link:
if (intr_status & EESR_TWB) {
/* Write buck end. unused write back interrupt */
if (intr_status & EESR_TABT) /* Transmit Abort int */
@@ -1324,18 +1326,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
struct sh_eth_private *mdp = netdev_priv(ndev);
struct sh_eth_cpu_data *cd = mdp->cd;
irqreturn_t ret = IRQ_NONE;
- unsigned long intr_status;
+ u32 intr_status = 0;
spin_lock(&mdp->lock);
- /* Get interrupt status */
+ /* Get interrpt stat */
intr_status = sh_eth_read(ndev, EESR);
- /* Mask it with the interrupt mask, forcing ECI interrupt to be always
- * enabled since it's the one that comes thru regardless of the mask,
- * and we need to fully handle it in sh_eth_error() in order to quench
- * it as it doesn't get cleared by just writing 1 to the ECI bit...
- */
- intr_status &= sh_eth_read(ndev, EESIPR) | DMAC_M_ECI;
/* Clear interrupt */
if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
@@ -1377,7 +1373,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
struct phy_device *phydev = mdp->phydev;
int new_state = 0;
- if (phydev->link) {
+ if (phydev->link != PHY_DOWN) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
mdp->duplex = phydev->duplex;
@@ -1391,21 +1387,17 @@ static void sh_eth_adjust_link(struct net_device *ndev)
if (mdp->cd->set_rate)
mdp->cd->set_rate(ndev);
}
- if (!mdp->link) {
+ if (mdp->link == PHY_DOWN) {
sh_eth_write(ndev,
(sh_eth_read(ndev, ECMR) & ~ECMR_TXF), ECMR);
new_state = 1;
mdp->link = phydev->link;
- if (mdp->cd->no_psr || mdp->no_ether_link)
- sh_eth_rcv_snd_enable(ndev);
}
} else if (mdp->link) {
new_state = 1;
- mdp->link = 0;
+ mdp->link = PHY_DOWN;
mdp->speed = 0;
mdp->duplex = -1;
- if (mdp->cd->no_psr || mdp->no_ether_link)
- sh_eth_rcv_snd_disable(ndev);
}
if (new_state && netif_msg_link(mdp))
@@ -1422,7 +1414,7 @@ static int sh_eth_phy_init(struct net_device *ndev)
snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
mdp->mii_bus->id , mdp->phy_id);
- mdp->link = 0;
+ mdp->link = PHY_DOWN;
mdp->speed = 0;
mdp->duplex = -1;
diff --git a/trunk/drivers/net/ethernet/renesas/sh_eth.h b/trunk/drivers/net/ethernet/renesas/sh_eth.h
index 828be4515008..e6655678458e 100644
--- a/trunk/drivers/net/ethernet/renesas/sh_eth.h
+++ b/trunk/drivers/net/ethernet/renesas/sh_eth.h
@@ -723,7 +723,7 @@ struct sh_eth_private {
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
struct phy_device *phydev; /* PHY device control */
- int link;
+ enum phy_state link;
phy_interface_t phy_interface;
int msg_enable;
int speed;
diff --git a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
index 50617c5a0bdb..0c74a702d461 100644
--- a/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
+++ b/trunk/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
@@ -149,7 +149,6 @@ void dwmac_mmc_intr_all_mask(void __iomem *ioaddr)
{
writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_INTR_MASK);
writel(MMC_DEFAULT_MASK, ioaddr + MMC_TX_INTR_MASK);
- writel(MMC_DEFAULT_MASK, ioaddr + MMC_RX_IPC_INTR_MASK);
}
/* This reads the MAC core counters (if actaully supported).
diff --git a/trunk/drivers/net/ethernet/ti/cpsw.c b/trunk/drivers/net/ethernet/ti/cpsw.c
index 4781d3d8e182..df32a090d08e 100644
--- a/trunk/drivers/net/ethernet/ti/cpsw.c
+++ b/trunk/drivers/net/ethernet/ti/cpsw.c
@@ -436,7 +436,7 @@ void cpsw_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
- netif_wake_queue(ndev);
+ netif_start_queue(ndev);
cpts_tx_timestamp(priv->cpts, skb);
priv->stats.tx_packets++;
priv->stats.tx_bytes += len;
@@ -1380,7 +1380,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
if (data->dual_emac) {
- if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
+ if (of_property_read_u32(node, "dual_emac_res_vlan",
&prop)) {
pr_err("Missing dual_emac_res_vlan in DT.\n");
slave_data->dual_emac_res_vlan = i+1;
diff --git a/trunk/drivers/net/ethernet/ti/davinci_emac.c b/trunk/drivers/net/ethernet/ti/davinci_emac.c
index 72300bc9e378..ae1b77aa199f 100644
--- a/trunk/drivers/net/ethernet/ti/davinci_emac.c
+++ b/trunk/drivers/net/ethernet/ti/davinci_emac.c
@@ -1053,7 +1053,7 @@ static void emac_tx_handler(void *token, int len, int status)
* queue is stopped then start the queue as we have free desc for tx
*/
if (unlikely(netif_queue_stopped(ndev)))
- netif_wake_queue(ndev);
+ netif_start_queue(ndev);
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += len;
dev_kfree_skb_any(skb);
diff --git a/trunk/drivers/net/hyperv/netvsc.c b/trunk/drivers/net/hyperv/netvsc.c
index f5f0f09e4cc5..1cd77483da50 100644
--- a/trunk/drivers/net/hyperv/netvsc.c
+++ b/trunk/drivers/net/hyperv/netvsc.c
@@ -470,10 +470,8 @@ static void netvsc_send_completion(struct hv_device *device,
packet->trans_id;
/* Notify the layer above us */
- if (nvsc_packet)
- nvsc_packet->completion.send.send_completion(
- nvsc_packet->completion.send.
- send_completion_ctx);
+ nvsc_packet->completion.send.send_completion(
+ nvsc_packet->completion.send.send_completion_ctx);
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
@@ -500,7 +498,6 @@ int netvsc_send(struct hv_device *device,
int ret = 0;
struct nvsp_message sendMessage;
struct net_device *ndev;
- u64 req_id;
net_device = get_outbound_net_device(device);
if (!net_device)
@@ -521,24 +518,20 @@ int netvsc_send(struct hv_device *device,
0xFFFFFFFF;
sendMessage.msg.v1_msg.send_rndis_pkt.send_buf_section_size = 0;
- if (packet->completion.send.send_completion)
- req_id = (u64)packet;
- else
- req_id = 0;
-
if (packet->page_buf_cnt) {
ret = vmbus_sendpacket_pagebuffer(device->channel,
packet->page_buf,
packet->page_buf_cnt,
&sendMessage,
sizeof(struct nvsp_message),
- req_id);
+ (unsigned long)packet);
} else {
ret = vmbus_sendpacket(device->channel, &sendMessage,
sizeof(struct nvsp_message),
- req_id,
+ (unsigned long)packet,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
+
}
if (ret == 0) {
diff --git a/trunk/drivers/net/hyperv/netvsc_drv.c b/trunk/drivers/net/hyperv/netvsc_drv.c
index 8341b62e5521..5f85205cd12b 100644
--- a/trunk/drivers/net/hyperv/netvsc_drv.c
+++ b/trunk/drivers/net/hyperv/netvsc_drv.c
@@ -241,11 +241,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
if (status == 1) {
netif_carrier_on(net);
+ netif_wake_queue(net);
ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
netif_carrier_off(net);
+ netif_tx_disable(net);
}
}
diff --git a/trunk/drivers/net/hyperv/rndis_filter.c b/trunk/drivers/net/hyperv/rndis_filter.c
index 0775f0aefd1e..2b657d4d63a8 100644
--- a/trunk/drivers/net/hyperv/rndis_filter.c
+++ b/trunk/drivers/net/hyperv/rndis_filter.c
@@ -61,6 +61,9 @@ struct rndis_request {
static void rndis_filter_send_completion(void *ctx);
+static void rndis_filter_send_request_completion(void *ctx);
+
+
static struct rndis_device *get_rndis_device(void)
{
@@ -238,7 +241,10 @@ static int rndis_filter_send_request(struct rndis_device *dev,
packet->page_buf[0].len;
}
- packet->completion.send.send_completion = NULL;
+ packet->completion.send.send_completion_ctx = req;/* packet; */
+ packet->completion.send.send_completion =
+ rndis_filter_send_request_completion;
+ packet->completion.send.send_completion_tid = (unsigned long)dev;
ret = netvsc_send(dev->net_dev->dev, packet);
return ret;
@@ -993,3 +999,9 @@ static void rndis_filter_send_completion(void *ctx)
/* Pass it back to the original handler */
filter_pkt->completion(filter_pkt->completion_ctx);
}
+
+
+static void rndis_filter_send_request_completion(void *ctx)
+{
+ /* Noop */
+}
diff --git a/trunk/drivers/net/tun.c b/trunk/drivers/net/tun.c
index 729ed533bb33..b7c457adc0dc 100644
--- a/trunk/drivers/net/tun.c
+++ b/trunk/drivers/net/tun.c
@@ -1594,7 +1594,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
if (tun->flags & TUN_TAP_MQ &&
(tun->numqueues + tun->numdisabled > 1))
- return -EBUSY;
+ return err;
}
else {
char *name;
diff --git a/trunk/drivers/net/usb/cdc_mbim.c b/trunk/drivers/net/usb/cdc_mbim.c
index 6bd91676d2cb..16c842997291 100644
--- a/trunk/drivers/net/usb/cdc_mbim.c
+++ b/trunk/drivers/net/usb/cdc_mbim.c
@@ -134,7 +134,7 @@ static struct sk_buff *cdc_mbim_tx_fixup(struct usbnet *dev, struct sk_buff *skb
goto error;
if (skb) {
- if (skb->len <= ETH_HLEN)
+ if (skb->len <= sizeof(ETH_HLEN))
goto error;
/* mapping VLANs to MBIM sessions:
diff --git a/trunk/drivers/net/usb/qmi_wwan.c b/trunk/drivers/net/usb/qmi_wwan.c
index 2a3579f67910..968d5d50751d 100644
--- a/trunk/drivers/net/usb/qmi_wwan.c
+++ b/trunk/drivers/net/usb/qmi_wwan.c
@@ -13,7 +13,6 @@
#include
#include
#include
-#include
#include
#include
#include
@@ -53,96 +52,6 @@ struct qmi_wwan_state {
struct usb_interface *data;
};
-/* default ethernet address used by the modem */
-static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
-
-/* Make up an ethernet header if the packet doesn't have one.
- *
- * A firmware bug common among several devices cause them to send raw
- * IP packets under some circumstances. There is no way for the
- * driver/host to know when this will happen. And even when the bug
- * hits, some packets will still arrive with an intact header.
- *
- * The supported devices are only capably of sending IPv4, IPv6 and
- * ARP packets on a point-to-point link. Any packet with an ethernet
- * header will have either our address or a broadcast/multicast
- * address as destination. ARP packets will always have a header.
- *
- * This means that this function will reliably add the appropriate
- * header iff necessary, provided our hardware address does not start
- * with 4 or 6.
- *
- * Another common firmware bug results in all packets being addressed
- * to 00:a0:c6:00:00:00 despite the host address being different.
- * This function will also fixup such packets.
- */
-static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
-{
- __be16 proto;
-
- /* usbnet rx_complete guarantees that skb->len is at least
- * hard_header_len, so we can inspect the dest address without
- * checking skb->len
- */
- switch (skb->data[0] & 0xf0) {
- case 0x40:
- proto = htons(ETH_P_IP);
- break;
- case 0x60:
- proto = htons(ETH_P_IPV6);
- break;
- case 0x00:
- if (is_multicast_ether_addr(skb->data))
- return 1;
- /* possibly bogus destination - rewrite just in case */
- skb_reset_mac_header(skb);
- goto fix_dest;
- default:
- /* pass along other packets without modifications */
- return 1;
- }
- if (skb_headroom(skb) < ETH_HLEN)
- return 0;
- skb_push(skb, ETH_HLEN);
- skb_reset_mac_header(skb);
- eth_hdr(skb)->h_proto = proto;
- memset(eth_hdr(skb)->h_source, 0, ETH_ALEN);
-fix_dest:
- memcpy(eth_hdr(skb)->h_dest, dev->net->dev_addr, ETH_ALEN);
- return 1;
-}
-
-/* very simplistic detection of IPv4 or IPv6 headers */
-static bool possibly_iphdr(const char *data)
-{
- return (data[0] & 0xd0) == 0x40;
-}
-
-/* disallow addresses which may be confused with IP headers */
-static int qmi_wwan_mac_addr(struct net_device *dev, void *p)
-{
- int ret;
- struct sockaddr *addr = p;
-
- ret = eth_prepare_mac_addr_change(dev, p);
- if (ret < 0)
- return ret;
- if (possibly_iphdr(addr->sa_data))
- return -EADDRNOTAVAIL;
- eth_commit_mac_addr_change(dev, p);
- return 0;
-}
-
-static const struct net_device_ops qmi_wwan_netdev_ops = {
- .ndo_open = usbnet_open,
- .ndo_stop = usbnet_stop,
- .ndo_start_xmit = usbnet_start_xmit,
- .ndo_tx_timeout = usbnet_tx_timeout,
- .ndo_change_mtu = usbnet_change_mtu,
- .ndo_set_mac_address = qmi_wwan_mac_addr,
- .ndo_validate_addr = eth_validate_addr,
-};
-
/* using a counter to merge subdriver requests with our own into a combined state */
static int qmi_wwan_manage_power(struct usbnet *dev, int on)
{
@@ -320,18 +229,6 @@ static int qmi_wwan_bind(struct usbnet *dev, struct usb_interface *intf)
usb_driver_release_interface(driver, info->data);
}
- /* Never use the same address on both ends of the link, even
- * if the buggy firmware told us to.
- */
- if (!compare_ether_addr(dev->net->dev_addr, default_modem_addr))
- eth_hw_addr_random(dev->net);
-
- /* make MAC addr easily distinguishable from an IP header */
- if (possibly_iphdr(dev->net->dev_addr)) {
- dev->net->dev_addr[0] |= 0x02; /* set local assignment bit */
- dev->net->dev_addr[0] &= 0xbf; /* clear "IP" bit */
- }
- dev->net->netdev_ops = &qmi_wwan_netdev_ops;
err:
return status;
}
@@ -410,7 +307,6 @@ static const struct driver_info qmi_wwan_info = {
.bind = qmi_wwan_bind,
.unbind = qmi_wwan_unbind,
.manage_power = qmi_wwan_manage_power,
- .rx_fixup = qmi_wwan_rx_fixup,
};
#define HUAWEI_VENDOR_ID 0x12D1
diff --git a/trunk/drivers/net/usb/smsc75xx.c b/trunk/drivers/net/usb/smsc75xx.c
index 1a15ec14c386..9abe51710f22 100644
--- a/trunk/drivers/net/usb/smsc75xx.c
+++ b/trunk/drivers/net/usb/smsc75xx.c
@@ -914,12 +914,8 @@ static int smsc75xx_set_rx_max_frame_length(struct usbnet *dev, int size)
static int smsc75xx_change_mtu(struct net_device *netdev, int new_mtu)
{
struct usbnet *dev = netdev_priv(netdev);
- int ret;
-
- if (new_mtu > MAX_SINGLE_PACKET_SIZE)
- return -EINVAL;
- ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+ int ret = smsc75xx_set_rx_max_frame_length(dev, new_mtu);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set mac rx frame length\n");
return ret;
@@ -1328,7 +1324,7 @@ static int smsc75xx_reset(struct usbnet *dev)
netif_dbg(dev, ifup, dev->net, "FCT_TX_CTL set to 0x%08x\n", buf);
- ret = smsc75xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+ ret = smsc75xx_set_rx_max_frame_length(dev, 1514);
if (ret < 0) {
netdev_warn(dev->net, "Failed to set max rx frame length\n");
return ret;
@@ -2138,8 +2134,8 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
else if (rx_cmd_a & (RX_CMD_A_LONG | RX_CMD_A_RUNT))
dev->net->stats.rx_frame_errors++;
} else {
- /* MAX_SINGLE_PACKET_SIZE + 4(CRC) + 2(COE) + 4(Vlan) */
- if (unlikely(size > (MAX_SINGLE_PACKET_SIZE + ETH_HLEN + 12))) {
+ /* ETH_FRAME_LEN + 4(CRC) + 2(COE) + 4(Vlan) */
+ if (unlikely(size > (ETH_FRAME_LEN + 12))) {
netif_dbg(dev, rx_err, dev->net,
"size err rx_cmd_a=0x%08x\n",
rx_cmd_a);
diff --git a/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h b/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
index bdee2ed67219..28fd99203f64 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
+++ b/trunk/drivers/net/wireless/ath/ath9k/ar9580_1p0_initvals.h
@@ -519,7 +519,7 @@ static const u32 ar9580_1p0_mac_core[][2] = {
{0x00008258, 0x00000000},
{0x0000825c, 0x40000000},
{0x00008260, 0x00080922},
- {0x00008264, 0x9d400010},
+ {0x00008264, 0x9bc00010},
{0x00008268, 0xffffffff},
{0x0000826c, 0x0000ffff},
{0x00008270, 0x00000000},
diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c b/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
index 73fe8d6db566..467b60014b7b 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/dfs_pattern_detector.c
@@ -143,14 +143,14 @@ channel_detector_create(struct dfs_pattern_detector *dpd, u16 freq)
u32 sz, i;
struct channel_detector *cd;
- cd = kmalloc(sizeof(*cd), GFP_ATOMIC);
+ cd = kmalloc(sizeof(*cd), GFP_KERNEL);
if (cd == NULL)
goto fail;
INIT_LIST_HEAD(&cd->head);
cd->freq = freq;
sz = sizeof(cd->detectors) * dpd->num_radar_types;
- cd->detectors = kzalloc(sz, GFP_ATOMIC);
+ cd->detectors = kzalloc(sz, GFP_KERNEL);
if (cd->detectors == NULL)
goto fail;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c b/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
index 5e48c5515b8c..91b8dceeadb1 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/dfs_pri_detector.c
@@ -218,7 +218,7 @@ static bool pulse_queue_enqueue(struct pri_detector *pde, u64 ts)
{
struct pulse_elem *p = pool_get_pulse_elem();
if (p == NULL) {
- p = kmalloc(sizeof(*p), GFP_ATOMIC);
+ p = kmalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
DFS_POOL_STAT_INC(pulse_alloc_error);
return false;
@@ -299,7 +299,7 @@ static bool pseq_handler_create_sequences(struct pri_detector *pde,
ps.deadline_ts = ps.first_ts + ps.dur;
new_ps = pool_get_pseq_elem();
if (new_ps == NULL) {
- new_ps = kmalloc(sizeof(*new_ps), GFP_ATOMIC);
+ new_ps = kmalloc(sizeof(*new_ps), GFP_KERNEL);
if (new_ps == NULL) {
DFS_POOL_STAT_INC(pseq_alloc_error);
return false;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index a47f5e05fc04..716058b67557 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -796,7 +796,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv)
* required version.
*/
if (priv->fw_version_major != MAJOR_VERSION_REQ ||
- priv->fw_version_minor < MINOR_VERSION_REQ) {
+ priv->fw_version_minor != MINOR_VERSION_REQ) {
dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n",
MAJOR_VERSION_REQ, MINOR_VERSION_REQ);
return -EINVAL;
diff --git a/trunk/drivers/net/wireless/ath/ath9k/link.c b/trunk/drivers/net/wireless/ath/ath9k/link.c
index 7fdac6c7b3ea..39c84ecf6a42 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/link.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/link.c
@@ -170,8 +170,7 @@ void ath_rx_poll(unsigned long data)
{
struct ath_softc *sc = (struct ath_softc *)data;
- if (!test_bit(SC_OP_INVALID, &sc->sc_flags))
- ieee80211_queue_work(sc->hw, &sc->hw_check_work);
+ ieee80211_queue_work(sc->hw, &sc->hw_check_work);
}
/*
diff --git a/trunk/drivers/net/wireless/ath/ath9k/main.c b/trunk/drivers/net/wireless/ath/ath9k/main.c
index 988372d218a4..6e66f9c6782b 100644
--- a/trunk/drivers/net/wireless/ath/ath9k/main.c
+++ b/trunk/drivers/net/wireless/ath/ath9k/main.c
@@ -280,10 +280,6 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
if (r) {
ath_err(common,
"Unable to reset channel, reset status %d\n", r);
-
- ath9k_hw_enable_interrupts(ah);
- ath9k_queue_reset(sc, RESET_TYPE_BB_HANG);
-
goto out;
}
diff --git a/trunk/drivers/net/wireless/b43/dma.c b/trunk/drivers/net/wireless/b43/dma.c
index 122146943bf2..38bc5a7997ff 100644
--- a/trunk/drivers/net/wireless/b43/dma.c
+++ b/trunk/drivers/net/wireless/b43/dma.c
@@ -1487,12 +1487,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
const struct b43_dma_ops *ops;
struct b43_dmaring *ring;
struct b43_dmadesc_meta *meta;
- static const struct b43_txstatus fake; /* filled with 0 */
- const struct b43_txstatus *txstat;
int slot, firstused;
bool frame_succeed;
- int skip;
- static u8 err_out1, err_out2;
ring = parse_cookie(dev, status->cookie, &slot);
if (unlikely(!ring))
@@ -1505,36 +1501,13 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
firstused = ring->current_slot - ring->used_slots + 1;
if (firstused < 0)
firstused = ring->nr_slots + firstused;
-
- skip = 0;
if (unlikely(slot != firstused)) {
/* This possibly is a firmware bug and will result in
- * malfunction, memory leaks and/or stall of DMA functionality.
- */
- if (slot == next_slot(ring, next_slot(ring, firstused))) {
- /* If a single header/data pair was missed, skip over
- * the first two slots in an attempt to recover.
- */
- slot = firstused;
- skip = 2;
- if (!err_out1) {
- /* Report the error once. */
- b43dbg(dev->wl,
- "Skip on DMA ring %d slot %d.\n",
- ring->index, slot);
- err_out1 = 1;
- }
- } else {
- /* More than a single header/data pair were missed.
- * Report this error once.
- */
- if (!err_out2)
- b43dbg(dev->wl,
- "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
- ring->index, firstused, slot);
- err_out2 = 1;
- return;
- }
+ * malfunction, memory leaks and/or stall of DMA functionality. */
+ b43dbg(dev->wl, "Out of order TX status report on DMA ring %d. "
+ "Expected %d, but got %d\n",
+ ring->index, firstused, slot);
+ return;
}
ops = ring->ops;
@@ -1549,13 +1522,11 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
slot, firstused, ring->index);
break;
}
-
if (meta->skb) {
struct b43_private_tx_info *priv_info =
- b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
+ b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
- unmap_descbuffer(ring, meta->dmaaddr,
- meta->skb->len, 1);
+ unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
kfree(priv_info->bouncebuffer);
priv_info->bouncebuffer = NULL;
} else {
@@ -1567,9 +1538,8 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
struct ieee80211_tx_info *info;
if (unlikely(!meta->skb)) {
- /* This is a scatter-gather fragment of a frame,
- * so the skb pointer must not be NULL.
- */
+ /* This is a scatter-gather fragment of a frame, so
+ * the skb pointer must not be NULL. */
b43dbg(dev->wl, "TX status unexpected NULL skb "
"at slot %d (first=%d) on ring %d\n",
slot, firstused, ring->index);
@@ -1580,18 +1550,9 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/*
* Call back to inform the ieee80211 subsystem about
- * the status of the transmission. When skipping over
- * a missed TX status report, use a status structure
- * filled with zeros to indicate that the frame was not
- * sent (frame_count 0) and not acknowledged
+ * the status of the transmission.
*/
- if (unlikely(skip))
- txstat = &fake;
- else
- txstat = status;
-
- frame_succeed = b43_fill_txstatus_report(dev, info,
- txstat);
+ frame_succeed = b43_fill_txstatus_report(dev, info, status);
#ifdef CONFIG_B43_DEBUG
if (frame_succeed)
ring->nr_succeed_tx_packets++;
@@ -1619,14 +1580,12 @@ void b43_dma_handle_txstatus(struct b43_wldev *dev,
/* Everything unmapped and free'd. So it's not used anymore. */
ring->used_slots--;
- if (meta->is_last_fragment && !skip) {
+ if (meta->is_last_fragment) {
/* This is the last scatter-gather
* fragment of the frame. We are done. */
break;
}
slot = next_slot(ring, slot);
- if (skip > 0)
- --skip;
}
if (ring->stopped) {
B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
diff --git a/trunk/drivers/net/wireless/b43/phy_n.c b/trunk/drivers/net/wireless/b43/phy_n.c
index b70f220bc4b3..3c35382ee6c2 100644
--- a/trunk/drivers/net/wireless/b43/phy_n.c
+++ b/trunk/drivers/net/wireless/b43/phy_n.c
@@ -1564,7 +1564,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
u16 clip_off[2] = { 0xFFFF, 0xFFFF };
u8 vcm_final = 0;
- s32 offset[4];
+ s8 offset[4];
s32 results[8][4] = { };
s32 results_min[4] = { };
s32 poll_results[4] = { };
@@ -1615,7 +1615,7 @@ static void b43_nphy_rev3_rssi_cal(struct b43_wldev *dev)
}
for (i = 0; i < 4; i += 2) {
s32 curr;
- s32 mind = 0x100000;
+ s32 mind = 40;
s32 minpoll = 249;
u8 minvcm = 0;
if (2 * core != i)
@@ -1732,7 +1732,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
u8 regs_save_radio[2];
u16 regs_save_phy[2];
- s32 offset[4];
+ s8 offset[4];
u8 core;
u8 rail;
@@ -1799,7 +1799,7 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
}
for (i = 0; i < 4; i++) {
- s32 mind = 0x100000;
+ s32 mind = 40;
u8 minvcm = 0;
s32 minpoll = 249;
s32 curr;
@@ -5165,8 +5165,7 @@ static void b43_nphy_pmu_spur_avoid(struct b43_wldev *dev, bool avoid)
#endif
#ifdef CONFIG_B43_SSB
case B43_BUS_SSB:
- ssb_pmu_spuravoid_pllupdate(&dev->dev->sdev->bus->chipco,
- avoid);
+ /* FIXME */
break;
#endif
}
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 35fc68be158d..4469321c0eb3 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -3317,15 +3317,15 @@ static int _brcmf_sdbrcm_download_firmware(struct brcmf_sdio *bus)
goto err;
}
+ /* External image takes precedence if specified */
if (brcmf_sdbrcm_download_code_file(bus)) {
brcmf_err("dongle image file download failed\n");
goto err;
}
- if (brcmf_sdbrcm_download_nvram(bus)) {
+ /* External nvram takes precedence if specified */
+ if (brcmf_sdbrcm_download_nvram(bus))
brcmf_err("dongle nvram file download failed\n");
- goto err;
- }
/* Take arm out of reset */
if (brcmf_sdbrcm_download_state(bus, false)) {
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 78da3eff75e8..2af9c0f0798d 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -1891,10 +1891,8 @@ static s32
brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
u8 key_idx, const u8 *mac_addr, struct key_params *params)
{
- struct brcmf_if *ifp = netdev_priv(ndev);
struct brcmf_wsec_key key;
s32 err = 0;
- u8 keybuf[8];
memset(&key, 0, sizeof(key));
key.index = (u32) key_idx;
@@ -1918,9 +1916,8 @@ brcmf_add_keyext(struct wiphy *wiphy, struct net_device *ndev,
brcmf_dbg(CONN, "Setting the key index %d\n", key.index);
memcpy(key.data, params->key, key.len);
- if ((ifp->vif->mode != WL_MODE_AP) &&
- (params->cipher == WLAN_CIPHER_SUITE_TKIP)) {
- brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ if (params->cipher == WLAN_CIPHER_SUITE_TKIP) {
+ u8 keybuf[8];
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2016,7 +2013,7 @@ brcmf_cfg80211_add_key(struct wiphy *wiphy, struct net_device *ndev,
break;
case WLAN_CIPHER_SUITE_TKIP:
if (ifp->vif->mode != WL_MODE_AP) {
- brcmf_dbg(CONN, "Swapping RX/TX MIC key\n");
+ brcmf_dbg(CONN, "Swapping key\n");
memcpy(keybuf, &key.data[24], sizeof(keybuf));
memcpy(&key.data[24], &key.data[16], sizeof(keybuf));
memcpy(&key.data[16], keybuf, sizeof(keybuf));
@@ -2121,7 +2118,8 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
err = -EAGAIN;
goto done;
}
- if (wsec & WEP_ENABLED) {
+ switch (wsec & ~SES_OW_ENABLED) {
+ case WEP_ENABLED:
sec = &profile->sec;
if (sec->cipher_pairwise & WLAN_CIPHER_SUITE_WEP40) {
params.cipher = WLAN_CIPHER_SUITE_WEP40;
@@ -2130,13 +2128,16 @@ brcmf_cfg80211_get_key(struct wiphy *wiphy, struct net_device *ndev,
params.cipher = WLAN_CIPHER_SUITE_WEP104;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_WEP104\n");
}
- } else if (wsec & TKIP_ENABLED) {
+ break;
+ case TKIP_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_TKIP;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_TKIP\n");
- } else if (wsec & AES_ENABLED) {
+ break;
+ case AES_ENABLED:
params.cipher = WLAN_CIPHER_SUITE_AES_CMAC;
brcmf_dbg(CONN, "WLAN_CIPHER_SUITE_AES_CMAC\n");
- } else {
+ break;
+ default:
brcmf_err("Invalid algo (0x%x)\n", wsec);
err = -EINVAL;
goto done;
@@ -3823,9 +3824,8 @@ brcmf_cfg80211_start_ap(struct wiphy *wiphy, struct net_device *ndev,
static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
{
struct brcmf_if *ifp = netdev_priv(ndev);
- s32 err;
+ s32 err = -EPERM;
struct brcmf_fil_bss_enable_le bss_enable;
- struct brcmf_join_params join_params;
brcmf_dbg(TRACE, "Enter\n");
@@ -3833,21 +3833,16 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
/* Due to most likely deauths outstanding we sleep */
/* first to make sure they get processed by fw. */
msleep(400);
-
- memset(&join_params, 0, sizeof(join_params));
- err = brcmf_fil_cmd_data_set(ifp, BRCMF_C_SET_SSID,
- &join_params, sizeof(join_params));
- if (err < 0)
- brcmf_err("SET SSID error (%d)\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
- if (err < 0)
- brcmf_err("BRCMF_C_UP error %d\n", err);
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_AP, 0);
- if (err < 0)
+ if (err < 0) {
brcmf_err("setting AP mode failed %d\n", err);
- err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_INFRA, 0);
- if (err < 0)
- brcmf_err("setting INFRA mode failed %d\n", err);
+ goto exit;
+ }
+ err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_UP, 0);
+ if (err < 0) {
+ brcmf_err("BRCMF_C_UP error %d\n", err);
+ goto exit;
+ }
} else {
bss_enable.bsscfg_idx = cpu_to_le32(ifp->bssidx);
bss_enable.enable = cpu_to_le32(0);
@@ -3860,6 +3855,7 @@ static int brcmf_cfg80211_stop_ap(struct wiphy *wiphy, struct net_device *ndev)
set_bit(BRCMF_VIF_STATUS_AP_CREATING, &ifp->vif->sme_state);
clear_bit(BRCMF_VIF_STATUS_AP_CREATED, &ifp->vif->sme_state);
+exit:
return err;
}
@@ -4126,6 +4122,10 @@ static const struct ieee80211_iface_limit brcmf_iface_limits[] = {
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP)
},
+ {
+ .max = 1,
+ .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
+ },
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_CLIENT) |
@@ -4183,7 +4183,8 @@ static struct wiphy *brcmf_setup_wiphy(struct device *phydev)
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
- BIT(NL80211_IFTYPE_P2P_GO);
+ BIT(NL80211_IFTYPE_P2P_GO) |
+ BIT(NL80211_IFTYPE_P2P_DEVICE);
wiphy->iface_combinations = brcmf_iface_combos;
wiphy->n_iface_combinations = ARRAY_SIZE(brcmf_iface_combos);
wiphy->bands[IEEE80211_BAND_2GHZ] = &__wl_band_2ghz;
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
index e2340b231aa1..c6451c61407a 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
@@ -274,130 +274,6 @@ static void brcms_set_basic_rate(struct brcm_rateset *rs, u16 rate, bool is_br)
}
}
-/**
- * This function frees the WL per-device resources.
- *
- * This function frees resources owned by the WL device pointed to
- * by the wl parameter.
- *
- * precondition: can both be called locked and unlocked
- *
- */
-static void brcms_free(struct brcms_info *wl)
-{
- struct brcms_timer *t, *next;
-
- /* free ucode data */
- if (wl->fw.fw_cnt)
- brcms_ucode_data_free(&wl->ucode);
- if (wl->irq)
- free_irq(wl->irq, wl);
-
- /* kill dpc */
- tasklet_kill(&wl->tasklet);
-
- if (wl->pub) {
- brcms_debugfs_detach(wl->pub);
- brcms_c_module_unregister(wl->pub, "linux", wl);
- }
-
- /* free common resources */
- if (wl->wlc) {
- brcms_c_detach(wl->wlc);
- wl->wlc = NULL;
- wl->pub = NULL;
- }
-
- /* virtual interface deletion is deferred so we cannot spinwait */
-
- /* wait for all pending callbacks to complete */
- while (atomic_read(&wl->callbacks) > 0)
- schedule();
-
- /* free timers */
- for (t = wl->timers; t; t = next) {
- next = t->next;
-#ifdef DEBUG
- kfree(t->name);
-#endif
- kfree(t);
- }
-}
-
-/*
-* called from both kernel as from this kernel module (error flow on attach)
-* precondition: perimeter lock is not acquired.
-*/
-static void brcms_remove(struct bcma_device *pdev)
-{
- struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
- struct brcms_info *wl = hw->priv;
-
- if (wl->wlc) {
- wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
- wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- ieee80211_unregister_hw(hw);
- }
-
- brcms_free(wl);
-
- bcma_set_drvdata(pdev, NULL);
- ieee80211_free_hw(hw);
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static void brcms_release_fw(struct brcms_info *wl)
-{
- int i;
- for (i = 0; i < MAX_FW_IMAGES; i++) {
- release_firmware(wl->fw.fw_bin[i]);
- release_firmware(wl->fw.fw_hdr[i]);
- }
-}
-
-/*
- * Precondition: Since this function is called in brcms_pci_probe() context,
- * no locking is required.
- */
-static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
-{
- int status;
- struct device *device = &pdev->dev;
- char fw_name[100];
- int i;
-
- memset(&wl->fw, 0, sizeof(struct brcms_firmware));
- for (i = 0; i < MAX_FW_IMAGES; i++) {
- if (brcms_firmwares[i] == NULL)
- break;
- sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
- UCODE_LOADER_API_VER);
- status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
- if (status) {
- wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
- KBUILD_MODNAME, fw_name);
- return status;
- }
- sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
- UCODE_LOADER_API_VER);
- status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
- if (status) {
- wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
- KBUILD_MODNAME, fw_name);
- return status;
- }
- wl->fw.hdr_num_entries[i] =
- wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
- }
- wl->fw.fw_cnt = i;
- status = brcms_ucode_data_init(wl, &wl->ucode);
- brcms_release_fw(wl);
- return status;
-}
-
static void brcms_ops_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
@@ -430,14 +306,6 @@ static int brcms_ops_start(struct ieee80211_hw *hw)
if (!blocked)
wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
- if (!wl->ucode.bcm43xx_bomminor) {
- err = brcms_request_fw(wl, wl->wlc->hw->d11core);
- if (err) {
- brcms_remove(wl->wlc->hw->d11core);
- return -ENOENT;
- }
- }
-
spin_lock_bh(&wl->lock);
/* avoid acknowledging frames before a non-monitor device is added */
wl->mute_tx = true;
@@ -925,6 +793,128 @@ void brcms_dpc(unsigned long data)
wake_up(&wl->tx_flush_wq);
}
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static int brcms_request_fw(struct brcms_info *wl, struct bcma_device *pdev)
+{
+ int status;
+ struct device *device = &pdev->dev;
+ char fw_name[100];
+ int i;
+
+ memset(&wl->fw, 0, sizeof(struct brcms_firmware));
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
+ if (brcms_firmwares[i] == NULL)
+ break;
+ sprintf(fw_name, "%s-%d.fw", brcms_firmwares[i],
+ UCODE_LOADER_API_VER);
+ status = request_firmware(&wl->fw.fw_bin[i], fw_name, device);
+ if (status) {
+ wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+ KBUILD_MODNAME, fw_name);
+ return status;
+ }
+ sprintf(fw_name, "%s_hdr-%d.fw", brcms_firmwares[i],
+ UCODE_LOADER_API_VER);
+ status = request_firmware(&wl->fw.fw_hdr[i], fw_name, device);
+ if (status) {
+ wiphy_err(wl->wiphy, "%s: fail to load firmware %s\n",
+ KBUILD_MODNAME, fw_name);
+ return status;
+ }
+ wl->fw.hdr_num_entries[i] =
+ wl->fw.fw_hdr[i]->size / (sizeof(struct firmware_hdr));
+ }
+ wl->fw.fw_cnt = i;
+ return brcms_ucode_data_init(wl, &wl->ucode);
+}
+
+/*
+ * Precondition: Since this function is called in brcms_pci_probe() context,
+ * no locking is required.
+ */
+static void brcms_release_fw(struct brcms_info *wl)
+{
+ int i;
+ for (i = 0; i < MAX_FW_IMAGES; i++) {
+ release_firmware(wl->fw.fw_bin[i]);
+ release_firmware(wl->fw.fw_hdr[i]);
+ }
+}
+
+/**
+ * This function frees the WL per-device resources.
+ *
+ * This function frees resources owned by the WL device pointed to
+ * by the wl parameter.
+ *
+ * precondition: can both be called locked and unlocked
+ *
+ */
+static void brcms_free(struct brcms_info *wl)
+{
+ struct brcms_timer *t, *next;
+
+ /* free ucode data */
+ if (wl->fw.fw_cnt)
+ brcms_ucode_data_free(&wl->ucode);
+ if (wl->irq)
+ free_irq(wl->irq, wl);
+
+ /* kill dpc */
+ tasklet_kill(&wl->tasklet);
+
+ if (wl->pub) {
+ brcms_debugfs_detach(wl->pub);
+ brcms_c_module_unregister(wl->pub, "linux", wl);
+ }
+
+ /* free common resources */
+ if (wl->wlc) {
+ brcms_c_detach(wl->wlc);
+ wl->wlc = NULL;
+ wl->pub = NULL;
+ }
+
+ /* virtual interface deletion is deferred so we cannot spinwait */
+
+ /* wait for all pending callbacks to complete */
+ while (atomic_read(&wl->callbacks) > 0)
+ schedule();
+
+ /* free timers */
+ for (t = wl->timers; t; t = next) {
+ next = t->next;
+#ifdef DEBUG
+ kfree(t->name);
+#endif
+ kfree(t);
+ }
+}
+
+/*
+* called from both kernel as from this kernel module (error flow on attach)
+* precondition: perimeter lock is not acquired.
+*/
+static void brcms_remove(struct bcma_device *pdev)
+{
+ struct ieee80211_hw *hw = bcma_get_drvdata(pdev);
+ struct brcms_info *wl = hw->priv;
+
+ if (wl->wlc) {
+ wiphy_rfkill_set_hw_state(wl->pub->ieee_hw->wiphy, false);
+ wiphy_rfkill_stop_polling(wl->pub->ieee_hw->wiphy);
+ ieee80211_unregister_hw(hw);
+ }
+
+ brcms_free(wl);
+
+ bcma_set_drvdata(pdev, NULL);
+ ieee80211_free_hw(hw);
+}
+
static irqreturn_t brcms_isr(int irq, void *dev_id)
{
struct brcms_info *wl;
@@ -1057,8 +1047,18 @@ static struct brcms_info *brcms_attach(struct bcma_device *pdev)
spin_lock_init(&wl->lock);
spin_lock_init(&wl->isr_lock);
+ /* prepare ucode */
+ if (brcms_request_fw(wl, pdev) < 0) {
+ wiphy_err(wl->wiphy, "%s: Failed to find firmware usually in "
+ "%s\n", KBUILD_MODNAME, "/lib/firmware/brcm");
+ brcms_release_fw(wl);
+ brcms_remove(pdev);
+ return NULL;
+ }
+
/* common load-time initialization */
wl->wlc = brcms_c_attach((void *)wl, pdev, unit, false, &err);
+ brcms_release_fw(wl);
if (!wl->wlc) {
wiphy_err(wl->wiphy, "%s: attach() failed with code %d\n",
KBUILD_MODNAME, err);
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
index 18d37645e2cd..21a824232478 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c
@@ -1137,8 +1137,9 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
- ((lna2 &
- 0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
+ ((lna2 & 0x3) << 4) |
+ ((lna1 & 0x3) << 2) |
+ ((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
@@ -1156,6 +1157,8 @@ wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
}
mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
+ mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
+ mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
}
@@ -1328,6 +1331,43 @@ static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
+static bool wlc_lcnphy_rx_iq_cal_gain(struct brcms_phy *pi, u16 biq1_gain,
+ u16 tia_gain, u16 lna2_gain)
+{
+ u32 i_thresh_l, q_thresh_l;
+ u32 i_thresh_h, q_thresh_h;
+ struct lcnphy_iq_est iq_est_h, iq_est_l;
+
+ wlc_lcnphy_set_rx_gain_by_distribution(pi, 0, 0, 0, biq1_gain, tia_gain,
+ lna2_gain, 0);
+
+ wlc_lcnphy_rx_gain_override_enable(pi, true);
+ wlc_lcnphy_start_tx_tone(pi, 2000, (40 >> 1), 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_l))
+ return false;
+
+ wlc_lcnphy_start_tx_tone(pi, 2000, 40, 0);
+ udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG112, 0);
+ if (!wlc_lcnphy_rx_iq_est(pi, 1024, 32, &iq_est_h))
+ return false;
+
+ i_thresh_l = (iq_est_l.i_pwr << 1);
+ i_thresh_h = (iq_est_l.i_pwr << 2) + iq_est_l.i_pwr;
+
+ q_thresh_l = (iq_est_l.q_pwr << 1);
+ q_thresh_h = (iq_est_l.q_pwr << 2) + iq_est_l.q_pwr;
+ if ((iq_est_h.i_pwr > i_thresh_l) &&
+ (iq_est_h.i_pwr < i_thresh_h) &&
+ (iq_est_h.q_pwr > q_thresh_l) &&
+ (iq_est_h.q_pwr < q_thresh_h))
+ return true;
+
+ return false;
+}
+
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
@@ -1342,8 +1382,8 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
- int tia_gain;
- u32 received_power, rx_pwr_threshold;
+ int tia_gain, lna2_gain, biq1_gain;
+ bool set_gain;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
@@ -1368,126 +1408,134 @@ wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
goto cal_done;
}
- if (module == 1) {
+ WARN_ON(module != 1);
+ tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
- tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
+ for (i = 0; i < 11; i++)
+ values_to_save[i] =
+ read_radio_reg(pi, rxiq_cal_rf_reg[i]);
+ Core1TxControl_old = read_phy_reg(pi, 0x631);
+
+ or_phy_reg(pi, 0x631, 0x0015);
+
+ RFOverride0_old = read_phy_reg(pi, 0x44c);
+ RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
+ rfoverride2_old = read_phy_reg(pi, 0x4b0);
+ rfoverride2val_old = read_phy_reg(pi, 0x4b1);
+ rfoverride3_old = read_phy_reg(pi, 0x4f9);
+ rfoverride3val_old = read_phy_reg(pi, 0x4fa);
+ rfoverride4_old = read_phy_reg(pi, 0x938);
+ rfoverride4val_old = read_phy_reg(pi, 0x939);
+ afectrlovr_old = read_phy_reg(pi, 0x43b);
+ afectrlovrval_old = read_phy_reg(pi, 0x43c);
+ old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
+ old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
- for (i = 0; i < 11; i++)
- values_to_save[i] =
- read_radio_reg(pi, rxiq_cal_rf_reg[i]);
- Core1TxControl_old = read_phy_reg(pi, 0x631);
-
- or_phy_reg(pi, 0x631, 0x0015);
-
- RFOverride0_old = read_phy_reg(pi, 0x44c);
- RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
- rfoverride2_old = read_phy_reg(pi, 0x4b0);
- rfoverride2val_old = read_phy_reg(pi, 0x4b1);
- rfoverride3_old = read_phy_reg(pi, 0x4f9);
- rfoverride3val_old = read_phy_reg(pi, 0x4fa);
- rfoverride4_old = read_phy_reg(pi, 0x938);
- rfoverride4val_old = read_phy_reg(pi, 0x939);
- afectrlovr_old = read_phy_reg(pi, 0x43b);
- afectrlovrval_old = read_phy_reg(pi, 0x43c);
- old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
- old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
-
- tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
- if (tx_gain_override_old) {
- wlc_lcnphy_get_tx_gain(pi, &old_gains);
- tx_gain_index_old = pi_lcn->lcnphy_current_index;
- }
+ tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
+ if (tx_gain_override_old) {
+ wlc_lcnphy_get_tx_gain(pi, &old_gains);
+ tx_gain_index_old = pi_lcn->lcnphy_current_index;
+ }
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
- mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
+ mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
- write_radio_reg(pi, RADIO_2064_REG116, 0x06);
- write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
- write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
- write_radio_reg(pi, RADIO_2064_REG098, 0x03);
- write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
- mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
- write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
- write_radio_reg(pi, RADIO_2064_REG114, 0x01);
- write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
- write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
-
- mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
- mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
- mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
- mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
- mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
- mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
-
- mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
- mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
-
- wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
- write_phy_reg(pi, 0x6da, 0xffff);
- or_phy_reg(pi, 0x6db, 0x3);
- wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
- wlc_lcnphy_rx_gain_override_enable(pi, true);
-
- tia_gain = 8;
- rx_pwr_threshold = 950;
- while (tia_gain > 0) {
- tia_gain -= 1;
- wlc_lcnphy_set_rx_gain_by_distribution(pi,
- 0, 0, 2, 2,
- (u16)
- tia_gain, 1, 0);
- udelay(500);
+ write_radio_reg(pi, RADIO_2064_REG116, 0x06);
+ write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
+ write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
+ write_radio_reg(pi, RADIO_2064_REG098, 0x03);
+ write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
+ mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
+ write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG114, 0x01);
+ write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
+ write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
+
+ mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
+ mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
+ mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
+ mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
+ mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
+ mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
- received_power =
- wlc_lcnphy_measure_digital_power(pi, 2000);
- if (received_power < rx_pwr_threshold)
- break;
+ mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
+ mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
+
+ write_phy_reg(pi, 0x6da, 0xffff);
+ or_phy_reg(pi, 0x6db, 0x3);
+
+ wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
+ set_gain = false;
+
+ lna2_gain = 3;
+ while ((lna2_gain >= 0) && !set_gain) {
+ tia_gain = 4;
+
+ while ((tia_gain >= 0) && !set_gain) {
+ biq1_gain = 6;
+
+ while ((biq1_gain >= 0) && !set_gain) {
+ set_gain = wlc_lcnphy_rx_iq_cal_gain(pi,
+ (u16)
+ biq1_gain,
+ (u16)
+ tia_gain,
+ (u16)
+ lna2_gain);
+ biq1_gain -= 1;
+ }
+ tia_gain -= 1;
}
- result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
+ lna2_gain -= 1;
+ }
- wlc_lcnphy_stop_tx_tone(pi);
+ if (set_gain)
+ result = wlc_lcnphy_calc_rx_iq_comp(pi, 1024);
+ else
+ result = false;
- write_phy_reg(pi, 0x631, Core1TxControl_old);
+ wlc_lcnphy_stop_tx_tone(pi);
- write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
- write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
- write_phy_reg(pi, 0x4b0, rfoverride2_old);
- write_phy_reg(pi, 0x4b1, rfoverride2val_old);
- write_phy_reg(pi, 0x4f9, rfoverride3_old);
- write_phy_reg(pi, 0x4fa, rfoverride3val_old);
- write_phy_reg(pi, 0x938, rfoverride4_old);
- write_phy_reg(pi, 0x939, rfoverride4val_old);
- write_phy_reg(pi, 0x43b, afectrlovr_old);
- write_phy_reg(pi, 0x43c, afectrlovrval_old);
- write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
- write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
+ write_phy_reg(pi, 0x631, Core1TxControl_old);
+
+ write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
+ write_phy_reg(pi, 0x4b0, rfoverride2_old);
+ write_phy_reg(pi, 0x4b1, rfoverride2val_old);
+ write_phy_reg(pi, 0x4f9, rfoverride3_old);
+ write_phy_reg(pi, 0x4fa, rfoverride3val_old);
+ write_phy_reg(pi, 0x938, rfoverride4_old);
+ write_phy_reg(pi, 0x939, rfoverride4val_old);
+ write_phy_reg(pi, 0x43b, afectrlovr_old);
+ write_phy_reg(pi, 0x43c, afectrlovrval_old);
+ write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
+ write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
- wlc_lcnphy_clear_trsw_override(pi);
+ wlc_lcnphy_clear_trsw_override(pi);
- mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
+ mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
- for (i = 0; i < 11; i++)
- write_radio_reg(pi, rxiq_cal_rf_reg[i],
- values_to_save[i]);
+ for (i = 0; i < 11; i++)
+ write_radio_reg(pi, rxiq_cal_rf_reg[i],
+ values_to_save[i]);
- if (tx_gain_override_old)
- wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
- else
- wlc_lcnphy_disable_tx_gain_override(pi);
+ if (tx_gain_override_old)
+ wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
+ else
+ wlc_lcnphy_disable_tx_gain_override(pi);
- wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
- wlc_lcnphy_rx_gain_override_enable(pi, false);
- }
+ wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
+ wlc_lcnphy_rx_gain_override_enable(pi, false);
cal_done:
kfree(ptr);
@@ -1781,6 +1829,17 @@ wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ u8 reg038[14] = {0xd, 0xe, 0xd, 0xd, 0xd, 0xc,
+ 0xa, 0xb, 0xb, 0x3, 0x3, 0x2, 0x0, 0x0};
+
+ write_radio_reg(pi, RADIO_2064_REG02A, 0xf);
+ write_radio_reg(pi, RADIO_2064_REG091, 0x3);
+ write_radio_reg(pi, RADIO_2064_REG038, 0x3);
+
+ write_radio_reg(pi, RADIO_2064_REG038, reg038[channel - 1]);
+ }
}
static int
@@ -1975,6 +2034,16 @@ wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x4, 1<<2);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x10, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x10, 1<<4);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x77);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0xe<<1);
+ mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1<<7);
+ mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 1<<1);
+ mod_radio_reg(pi, RADIO_2064_REG029, 0xf0, 0<<4);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
@@ -2061,12 +2130,14 @@ static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
+ mod_radio_reg(pi, RADIO_2064_REG07C, (1 << 0), (1 << 0));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
+ u8 tssi_sel;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
@@ -2088,7 +2159,13 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
- wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+ if (pi->sh->boardflags & BFL_FEM) {
+ tssi_sel = 0x1;
+ wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
+ } else {
+ tssi_sel = 0xe;
+ wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_POST_PA);
+ }
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
@@ -2124,9 +2201,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
- mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
+ mod_radio_reg(pi, RADIO_2064_REG028, 0xf, tssi_sel);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
+ mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, tssi_sel << 1);
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
@@ -2173,6 +2251,10 @@ static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
+ mod_radio_reg(pi, RADIO_2064_REG035, 0xff, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG036, 0x3, 0x0);
+ mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
+
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
@@ -2791,6 +2873,8 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
+ u8 SAVE_bbmult = wlc_lcnphy_get_bbmult(pi);
+
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
@@ -2808,6 +2892,12 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
+
+ mod_phy_reg(pi, 0x4d7, (0x1 << 0), (1 << 0));
+ mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1 << 6));
+
+ wlc_lcnphy_set_bbmult(pi, 0x0);
+
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
@@ -2829,6 +2919,7 @@ static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
+ wlc_lcnphy_set_bbmult(pi, SAVE_bbmult);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
@@ -3042,6 +3133,11 @@ static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
+ mod_phy_reg(pi, 0x4d0, (0x1 << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 0), (0) << 0);
+ mod_phy_reg(pi, 0x4d3, (0xff << 8), (0) << 8);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 4), (0) << 4);
+ mod_phy_reg(pi, 0x4d0, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
@@ -3843,7 +3939,6 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
- wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
@@ -3854,6 +3949,7 @@ static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
+ wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
@@ -4278,17 +4374,22 @@ wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
if (CHSPEC_IS5G(pi->radio_chanspec))
pa_gain = 0x70;
else
- pa_gain = 0x70;
+ pa_gain = 0x60;
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
+
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
for (j = 0; j < 128; j++) {
- gm_gain = gain_table[j].gm;
+ if (pi->sh->boardflags & BFL_FEM)
+ gm_gain = gain_table[j].gm;
+ else
+ gm_gain = 15;
+
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
@@ -4499,7 +4600,10 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
write_phy_reg(pi, 0x4ea, 0x4688);
- mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ if (pi->sh->boardflags & BFL_FEM)
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
+ else
+ mod_phy_reg(pi, 0x4eb, (0x7 << 0), 3 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
@@ -4510,6 +4614,13 @@ static void wlc_radio_2064_init(struct brcms_phy *pi)
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
+
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ write_radio_reg(pi, RADIO_2064_REG032, 0x6f);
+ write_radio_reg(pi, RADIO_2064_REG033, 0x19);
+ write_radio_reg(pi, RADIO_2064_REG039, 0xe);
+ }
+
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
@@ -4539,22 +4650,20 @@ static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
wlc_lcnphy_write_table(pi, &tab);
}
- tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
- tab.tbl_width = 16;
- tab.tbl_ptr = &val;
- tab.tbl_len = 1;
-
- val = 114;
- tab.tbl_offset = 0;
- wlc_lcnphy_write_table(pi, &tab);
+ if (!(pi->sh->boardflags & BFL_FEM)) {
+ tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
+ tab.tbl_width = 16;
+ tab.tbl_ptr = &val;
+ tab.tbl_len = 1;
- val = 130;
- tab.tbl_offset = 1;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 150;
+ tab.tbl_offset = 0;
+ wlc_lcnphy_write_table(pi, &tab);
- val = 6;
- tab.tbl_offset = 8;
- wlc_lcnphy_write_table(pi, &tab);
+ val = 220;
+ tab.tbl_offset = 1;
+ wlc_lcnphy_write_table(pi, &tab);
+ }
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
@@ -4946,6 +5055,7 @@ void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
wlc_lcnphy_load_tx_iir_filter(pi, true, 3);
mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
+ wlc_lcnphy_tssi_setup(pi);
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
@@ -4984,8 +5094,7 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
- if ((pi->sh->boardflags & BFL_FEM) &&
- (LCNREV_IS(pi->pubpi.phy_rev, 1))) {
+ if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
diff --git a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
index 622c01ca72c5..b7e95acc2084 100644
--- a/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
+++ b/trunk/drivers/net/wireless/brcm80211/brcmsmac/phy/phytbl_lcn.c
@@ -1992,70 +1992,70 @@ static const u16 dot11lcn_sw_ctrl_tbl_4313_epa_rev0[] = {
};
static const u16 dot11lcn_sw_ctrl_tbl_4313_rev0[] = {
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
- 0x000a,
0x0009,
- 0x0006,
- 0x0005,
0x000a,
- 0x0009,
- 0x0006,
0x0005,
- 0x000a,
- 0x0009,
0x0006,
- 0x0005,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
+ 0x0009,
0x000a,
+ 0x0005,
+ 0x0006,
0x0009,
+ 0x000a,
+ 0x0005,
0x0006,
+ 0x0009,
+ 0x000a,
0x0005,
+ 0x0006,
};
static const u16 dot11lcn_sw_ctrl_tbl_rev0[] = {
diff --git a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
index 6c7493c2d698..e8324b5e5bfe 100644
--- a/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
+++ b/trunk/drivers/net/wireless/iwlegacy/4965-rs.c
@@ -2152,7 +2152,7 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
int rate_idx;
int i;
u32 rate;
- u8 use_green;
+ u8 use_green = il4965_rs_use_green(il, sta);
u8 active_tbl = 0;
u8 valid_tx_ant;
struct il_station_priv *sta_priv;
@@ -2160,7 +2160,6 @@ il4965_rs_initialize_lq(struct il_priv *il, struct ieee80211_conf *conf,
if (!sta || !lq_sta)
return;
- use_green = il4965_rs_use_green(il, sta);
sta_priv = (void *)sta->drv_priv;
i = lq_sta->last_txrate_idx;
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c
index 44ca0e57f9f7..86ea5f4c3939 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/lib.c
@@ -1261,15 +1261,6 @@ int iwl_dvm_send_cmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
return -EIO;
}
- /*
- * This can happen upon FW ASSERT: we clear the STATUS_FW_ERROR flag
- * in iwl_down but cancel the workers only later.
- */
- if (!priv->ucode_loaded) {
- IWL_ERR(priv, "Fw not loaded - dropping CMD: %x\n", cmd->id);
- return -EIO;
- }
-
/*
* Synchronous commands from this op-mode must hold
* the mutex, this ensures we don't try to send two
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
index a82b6b39d4ff..23be948cf162 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1419,14 +1419,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
mutex_lock(&priv->mutex);
- if (changes & BSS_CHANGED_IDLE && bss_conf->idle) {
- /*
- * If we go idle, then clearly no "passive-no-rx"
- * workaround is needed any more, this is a reset.
- */
- iwlagn_lift_passive_no_rx(priv);
- }
-
if (unlikely(!iwl_is_ready(priv))) {
IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
mutex_unlock(&priv->mutex);
@@ -1458,6 +1450,16 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
priv->timestamp = bss_conf->sync_tsf;
ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
} else {
+ /*
+ * If we disassociate while there are pending
+ * frames, just wake up the queues and let the
+ * frames "escape" ... This shouldn't really
+ * be happening to start with, but we should
+ * not get stuck in this case either since it
+ * can happen if userspace gets confused.
+ */
+ iwlagn_lift_passive_no_rx(priv);
+
ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
if (ctx->ctxid == IWL_RXON_CTX_BSS)
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c
index d1a670d7b10c..6aec2df3bb27 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1192,7 +1192,7 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb,
memset(&info->status, 0, sizeof(info->status));
if (status == TX_STATUS_FAIL_PASSIVE_NO_RX &&
- ctx->vif &&
+ iwl_is_associated_ctx(ctx) && ctx->vif &&
ctx->vif->type == NL80211_IFTYPE_STATION) {
/* block and stop all queues */
priv->passive_no_rx = true;
diff --git a/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c b/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c
index 1a4ac9236a44..736fe9bb140e 100644
--- a/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c
+++ b/trunk/drivers/net/wireless/iwlwifi/dvm/ucode.c
@@ -367,8 +367,6 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return -EIO;
}
- priv->ucode_loaded = true;
-
if (ucode_type != IWL_UCODE_WOWLAN) {
/* delay a bit to give rfkill time to run */
msleep(5);
@@ -382,6 +380,8 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return ret;
}
+ priv->ucode_loaded = true;
+
return 0;
}
diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c
index 12c4f31ca8fb..17bedc50e753 100644
--- a/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/trunk/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -475,10 +475,6 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
@@ -645,7 +641,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
bool hw_rfkill;
int err;
@@ -661,10 +656,6 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return 0;
@@ -703,10 +694,6 @@ static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
* op_mode.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans_pcie->status);
- else
- clear_bit(STATUS_RFKILL, &trans_pcie->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
}
}
diff --git a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c
index cb5c6792e3a8..8595c16f74de 100644
--- a/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/trunk/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1264,7 +1264,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
int copy = 0;
- if (!cmd->len[i])
+ if (!cmd->len)
continue;
/* need at least IWL_HCMD_SCRATCHBUF_SIZE copied */
diff --git a/trunk/drivers/net/wireless/mwifiex/cfg80211.c b/trunk/drivers/net/wireless/mwifiex/cfg80211.c
index 8aaf56ade4d9..a44023a7bd57 100644
--- a/trunk/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/trunk/drivers/net/wireless/mwifiex/cfg80211.c
@@ -1892,8 +1892,7 @@ mwifiex_cfg80211_scan(struct wiphy *wiphy,
}
}
- for (i = 0; i < min_t(u32, request->n_channels,
- MWIFIEX_USER_SCAN_CHAN_MAX); i++) {
+ for (i = 0; i < request->n_channels; i++) {
chan = request->channels[i];
priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value;
priv->user_scan_cfg->chan_list[i].radio_type = chan->band;
diff --git a/trunk/drivers/net/wireless/mwifiex/pcie.c b/trunk/drivers/net/wireless/mwifiex/pcie.c
index feb204613397..5c395e2e6a2b 100644
--- a/trunk/drivers/net/wireless/mwifiex/pcie.c
+++ b/trunk/drivers/net/wireless/mwifiex/pcie.c
@@ -1508,7 +1508,6 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
}
memcpy(adapter->upld_buf, skb->data,
min_t(u32, MWIFIEX_SIZE_OF_CMD_BUFFER, skb->len));
- skb_push(skb, INTF_HEADER_LEN);
if (mwifiex_map_pci_memory(adapter, skb, MWIFIEX_UPLD_SIZE,
PCI_DMA_FROMDEVICE))
return -1;
diff --git a/trunk/drivers/net/wireless/mwifiex/scan.c b/trunk/drivers/net/wireless/mwifiex/scan.c
index e7f6deaf715e..d215b4d3c51b 100644
--- a/trunk/drivers/net/wireless/mwifiex/scan.c
+++ b/trunk/drivers/net/wireless/mwifiex/scan.c
@@ -1393,10 +1393,8 @@ int mwifiex_scan_networks(struct mwifiex_private *priv,
queue_work(adapter->workqueue, &adapter->main_work);
/* Perform internal scan synchronously */
- if (!priv->scan_request) {
- dev_dbg(adapter->dev, "wait internal scan\n");
+ if (!priv->scan_request)
mwifiex_wait_queue_complete(adapter, cmd_node);
- }
} else {
spin_unlock_irqrestore(&adapter->scan_pending_q_lock,
flags);
@@ -1795,12 +1793,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv,
/* Need to indicate IOCTL complete */
if (adapter->curr_cmd->wait_q_enabled) {
adapter->cmd_wait_q.status = 0;
- if (!priv->scan_request) {
- dev_dbg(adapter->dev,
- "complete internal scan\n");
- mwifiex_complete_cmd(adapter,
- adapter->curr_cmd);
- }
+ mwifiex_complete_cmd(adapter, adapter->curr_cmd);
}
if (priv->report_scan_result)
priv->report_scan_result = false;
diff --git a/trunk/drivers/net/wireless/rt2x00/Kconfig b/trunk/drivers/net/wireless/rt2x00/Kconfig
index 76cd47eb901e..2bf4efa33186 100644
--- a/trunk/drivers/net/wireless/rt2x00/Kconfig
+++ b/trunk/drivers/net/wireless/rt2x00/Kconfig
@@ -20,7 +20,6 @@ if RT2X00
config RT2400PCI
tristate "Ralink rt2400 (PCI/PCMCIA) support"
depends on PCI
- select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@@ -32,7 +31,6 @@ config RT2400PCI
config RT2500PCI
tristate "Ralink rt2500 (PCI/PCMCIA) support"
depends on PCI
- select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI
select EEPROM_93CX6
---help---
@@ -45,7 +43,6 @@ config RT61PCI
tristate "Ralink rt2501/rt61 (PCI/PCMCIA) support"
depends on PCI
select RT2X00_LIB_PCI
- select RT2X00_LIB_MMIO
select RT2X00_LIB_FIRMWARE
select RT2X00_LIB_CRYPTO
select CRC_ITU_T
@@ -60,7 +57,6 @@ config RT2800PCI
tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
depends on PCI || SOC_RT288X || SOC_RT305X
select RT2800_LIB
- select RT2X00_LIB_MMIO
select RT2X00_LIB_PCI if PCI
select RT2X00_LIB_SOC if SOC_RT288X || SOC_RT305X
select RT2X00_LIB_FIRMWARE
@@ -189,9 +185,6 @@ endif
config RT2800_LIB
tristate
-config RT2X00_LIB_MMIO
- tristate
-
config RT2X00_LIB_PCI
tristate
select RT2X00_LIB
diff --git a/trunk/drivers/net/wireless/rt2x00/Makefile b/trunk/drivers/net/wireless/rt2x00/Makefile
index f069d8bc5b67..349d5b8284a4 100644
--- a/trunk/drivers/net/wireless/rt2x00/Makefile
+++ b/trunk/drivers/net/wireless/rt2x00/Makefile
@@ -9,7 +9,6 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_FIRMWARE) += rt2x00firmware.o
rt2x00lib-$(CONFIG_RT2X00_LIB_LEDS) += rt2x00leds.o
obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
-obj-$(CONFIG_RT2X00_LIB_MMIO) += rt2x00mmio.o
obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
index dcfb54e0c516..221beaaa83f1 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -34,7 +34,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2400pci.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
index e1d2dc9ed28a..39edc59e8d03 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -34,7 +34,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2500pci.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
index ba5a05625aaa..ded73da4de0b 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -41,7 +41,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt2x00soc.h"
#include "rt2800lib.h"
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c
deleted file mode 100644
index d84a680ba0c9..000000000000
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.c
+++ /dev/null
@@ -1,216 +0,0 @@
-/*
- Copyright (C) 2004 - 2009 Ivo van Doorn
-
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- Module: rt2x00mmio
- Abstract: rt2x00 generic mmio device routines.
- */
-
-#include
-#include
-#include
-#include
-
-#include "rt2x00.h"
-#include "rt2x00mmio.h"
-
-/*
- * Register access.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const struct rt2x00_field32 field,
- u32 *reg)
-{
- unsigned int i;
-
- if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
- return 0;
-
- for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
- rt2x00pci_register_read(rt2x00dev, offset, reg);
- if (!rt2x00_get_field32(*reg, field))
- return 1;
- udelay(REGISTER_BUSY_DELAY);
- }
-
- printk_once(KERN_ERR "%s() Indirect register access failed: "
- "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg);
- *reg = ~0;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
-
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue = rt2x00dev->rx;
- struct queue_entry *entry;
- struct queue_entry_priv_pci *entry_priv;
- struct skb_frame_desc *skbdesc;
- int max_rx = 16;
-
- while (--max_rx) {
- entry = rt2x00queue_get_entry(queue, Q_INDEX);
- entry_priv = entry->priv_data;
-
- if (rt2x00dev->ops->lib->get_entry_state(entry))
- break;
-
- /*
- * Fill in desc fields of the skb descriptor
- */
- skbdesc = get_skb_frame_desc(entry->skb);
- skbdesc->desc = entry_priv->desc;
- skbdesc->desc_len = entry->queue->desc_size;
-
- /*
- * DMA is already done, notify rt2x00lib that
- * it finished successfully.
- */
- rt2x00lib_dmastart(entry);
- rt2x00lib_dmadone(entry);
-
- /*
- * Send the frame to rt2x00lib for further processing.
- */
- rt2x00lib_rxdone(entry, GFP_ATOMIC);
- }
-
- return !max_rx;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
-
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
-{
- unsigned int i;
-
- for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
- msleep(10);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
-
-/*
- * Device initialization handlers.
- */
-static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
-{
- struct queue_entry_priv_pci *entry_priv;
- void *addr;
- dma_addr_t dma;
- unsigned int i;
-
- /*
- * Allocate DMA memory for descriptor and buffer.
- */
- addr = dma_alloc_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size,
- &dma, GFP_KERNEL);
- if (!addr)
- return -ENOMEM;
-
- memset(addr, 0, queue->limit * queue->desc_size);
-
- /*
- * Initialize all queue entries to contain valid addresses.
- */
- for (i = 0; i < queue->limit; i++) {
- entry_priv = queue->entries[i].priv_data;
- entry_priv->desc = addr + i * queue->desc_size;
- entry_priv->desc_dma = dma + i * queue->desc_size;
- }
-
- return 0;
-}
-
-static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
- struct data_queue *queue)
-{
- struct queue_entry_priv_pci *entry_priv =
- queue->entries[0].priv_data;
-
- if (entry_priv->desc)
- dma_free_coherent(rt2x00dev->dev,
- queue->limit * queue->desc_size,
- entry_priv->desc, entry_priv->desc_dma);
- entry_priv->desc = NULL;
-}
-
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
- int status;
-
- /*
- * Allocate DMA
- */
- queue_for_each(rt2x00dev, queue) {
- status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
- if (status)
- goto exit;
- }
-
- /*
- * Register interrupt handler.
- */
- status = request_irq(rt2x00dev->irq,
- rt2x00dev->ops->lib->irq_handler,
- IRQF_SHARED, rt2x00dev->name, rt2x00dev);
- if (status) {
- ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
- rt2x00dev->irq, status);
- goto exit;
- }
-
- return 0;
-
-exit:
- queue_for_each(rt2x00dev, queue)
- rt2x00pci_free_queue_dma(rt2x00dev, queue);
-
- return status;
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
-
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
-{
- struct data_queue *queue;
-
- /*
- * Free irq line.
- */
- free_irq(rt2x00dev->irq, rt2x00dev);
-
- /*
- * Free DMA
- */
- queue_for_each(rt2x00dev, queue)
- rt2x00pci_free_queue_dma(rt2x00dev, queue);
-}
-EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
-
-/*
- * rt2x00mmio module information.
- */
-MODULE_AUTHOR(DRV_PROJECT);
-MODULE_VERSION(DRV_VERSION);
-MODULE_DESCRIPTION("rt2x00 mmio library");
-MODULE_LICENSE("GPL");
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h b/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h
deleted file mode 100644
index 4ecaf60175bf..000000000000
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00mmio.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/*
- Copyright (C) 2004 - 2009 Ivo van Doorn
-
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program; if not, write to the
- Free Software Foundation, Inc.,
- 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- */
-
-/*
- Module: rt2x00mmio
- Abstract: Data structures for the rt2x00mmio module.
- */
-
-#ifndef RT2X00MMIO_H
-#define RT2X00MMIO_H
-
-#include
-
-/*
- * Register access.
- */
-static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 *value)
-{
- *value = readl(rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- void *value, const u32 length)
-{
- memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
-}
-
-static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- u32 value)
-{
- writel(value, rt2x00dev->csr.base + offset);
-}
-
-static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const void *value,
- const u32 length)
-{
- __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
-}
-
-/**
- * rt2x00pci_regbusy_read - Read from register with busy check
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- * @offset: Register offset
- * @field: Field to check if register is busy
- * @reg: Pointer to where register contents should be stored
- *
- * This function will read the given register, and checks if the
- * register is busy. If it is, it will sleep for a couple of
- * microseconds before reading the register again. If the register
- * is not read after a certain timeout, this function will return
- * FALSE.
- */
-int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
- const unsigned int offset,
- const struct rt2x00_field32 field,
- u32 *reg);
-
-/**
- * struct queue_entry_priv_pci: Per entry PCI specific information
- *
- * @desc: Pointer to device descriptor
- * @desc_dma: DMA pointer to &desc.
- * @data: Pointer to device's entry memory.
- * @data_dma: DMA pointer to &data.
- */
-struct queue_entry_priv_pci {
- __le32 *desc;
- dma_addr_t desc_dma;
-};
-
-/**
- * rt2x00pci_rxdone - Handle RX done events
- * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
- *
- * Returns true if there are still rx frames pending and false if all
- * pending rx frames were processed.
- */
-bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
-
-/**
- * rt2x00pci_flush_queue - Flush data queue
- * @queue: Data queue to stop
- * @drop: True to drop all pending frames.
- *
- * This will wait for a maximum of 100ms, waiting for the queues
- * to become empty.
- */
-void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
-
-/*
- * Device initialization handlers.
- */
-int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
-void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
-
-#endif /* RT2X00MMIO_H */
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
index e87865e33113..a0c8caef3b0a 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -32,6 +32,182 @@
#include "rt2x00.h"
#include "rt2x00pci.h"
+/*
+ * Register access.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const struct rt2x00_field32 field,
+ u32 *reg)
+{
+ unsigned int i;
+
+ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
+ return 0;
+
+ for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
+ rt2x00pci_register_read(rt2x00dev, offset, reg);
+ if (!rt2x00_get_field32(*reg, field))
+ return 1;
+ udelay(REGISTER_BUSY_DELAY);
+ }
+
+ ERROR(rt2x00dev, "Indirect register access failed: "
+ "offset=0x%.08x, value=0x%.08x\n", offset, *reg);
+ *reg = ~0;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_regbusy_read);
+
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue = rt2x00dev->rx;
+ struct queue_entry *entry;
+ struct queue_entry_priv_pci *entry_priv;
+ struct skb_frame_desc *skbdesc;
+ int max_rx = 16;
+
+ while (--max_rx) {
+ entry = rt2x00queue_get_entry(queue, Q_INDEX);
+ entry_priv = entry->priv_data;
+
+ if (rt2x00dev->ops->lib->get_entry_state(entry))
+ break;
+
+ /*
+ * Fill in desc fields of the skb descriptor
+ */
+ skbdesc = get_skb_frame_desc(entry->skb);
+ skbdesc->desc = entry_priv->desc;
+ skbdesc->desc_len = entry->queue->desc_size;
+
+ /*
+ * DMA is already done, notify rt2x00lib that
+ * it finished successfully.
+ */
+ rt2x00lib_dmastart(entry);
+ rt2x00lib_dmadone(entry);
+
+ /*
+ * Send the frame to rt2x00lib for further processing.
+ */
+ rt2x00lib_rxdone(entry, GFP_ATOMIC);
+ }
+
+ return !max_rx;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_rxdone);
+
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop)
+{
+ unsigned int i;
+
+ for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++)
+ msleep(10);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_flush_queue);
+
+/*
+ * Device initialization handlers.
+ */
+static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
+{
+ struct queue_entry_priv_pci *entry_priv;
+ void *addr;
+ dma_addr_t dma;
+ unsigned int i;
+
+ /*
+ * Allocate DMA memory for descriptor and buffer.
+ */
+ addr = dma_alloc_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ &dma, GFP_KERNEL);
+ if (!addr)
+ return -ENOMEM;
+
+ memset(addr, 0, queue->limit * queue->desc_size);
+
+ /*
+ * Initialize all queue entries to contain valid addresses.
+ */
+ for (i = 0; i < queue->limit; i++) {
+ entry_priv = queue->entries[i].priv_data;
+ entry_priv->desc = addr + i * queue->desc_size;
+ entry_priv->desc_dma = dma + i * queue->desc_size;
+ }
+
+ return 0;
+}
+
+static void rt2x00pci_free_queue_dma(struct rt2x00_dev *rt2x00dev,
+ struct data_queue *queue)
+{
+ struct queue_entry_priv_pci *entry_priv =
+ queue->entries[0].priv_data;
+
+ if (entry_priv->desc)
+ dma_free_coherent(rt2x00dev->dev,
+ queue->limit * queue->desc_size,
+ entry_priv->desc, entry_priv->desc_dma);
+ entry_priv->desc = NULL;
+}
+
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+ int status;
+
+ /*
+ * Allocate DMA
+ */
+ queue_for_each(rt2x00dev, queue) {
+ status = rt2x00pci_alloc_queue_dma(rt2x00dev, queue);
+ if (status)
+ goto exit;
+ }
+
+ /*
+ * Register interrupt handler.
+ */
+ status = request_irq(rt2x00dev->irq,
+ rt2x00dev->ops->lib->irq_handler,
+ IRQF_SHARED, rt2x00dev->name, rt2x00dev);
+ if (status) {
+ ERROR(rt2x00dev, "IRQ %d allocation failed (error %d).\n",
+ rt2x00dev->irq, status);
+ goto exit;
+ }
+
+ return 0;
+
+exit:
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_initialize);
+
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev)
+{
+ struct data_queue *queue;
+
+ /*
+ * Free irq line.
+ */
+ free_irq(rt2x00dev->irq, rt2x00dev);
+
+ /*
+ * Free DMA
+ */
+ queue_for_each(rt2x00dev, queue)
+ rt2x00pci_free_queue_dma(rt2x00dev, queue);
+}
+EXPORT_SYMBOL_GPL(rt2x00pci_uninitialize);
+
/*
* PCI driver handlers.
*/
diff --git a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
index 60d90b20f8b9..e2c99f2b9a14 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/trunk/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -35,6 +35,94 @@
*/
#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
+/*
+ * Register access.
+ */
+static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ u32 *value)
+{
+ *value = readl(rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ void *value, const u32 length)
+{
+ memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
+}
+
+static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ u32 value)
+{
+ writel(value, rt2x00dev->csr.base + offset);
+}
+
+static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const void *value,
+ const u32 length)
+{
+ __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
+}
+
+/**
+ * rt2x00pci_regbusy_read - Read from register with busy check
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ * @offset: Register offset
+ * @field: Field to check if register is busy
+ * @reg: Pointer to where register contents should be stored
+ *
+ * This function will read the given register, and checks if the
+ * register is busy. If it is, it will sleep for a couple of
+ * microseconds before reading the register again. If the register
+ * is not read after a certain timeout, this function will return
+ * FALSE.
+ */
+int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev,
+ const unsigned int offset,
+ const struct rt2x00_field32 field,
+ u32 *reg);
+
+/**
+ * struct queue_entry_priv_pci: Per entry PCI specific information
+ *
+ * @desc: Pointer to device descriptor
+ * @desc_dma: DMA pointer to &desc.
+ * @data: Pointer to device's entry memory.
+ * @data_dma: DMA pointer to &data.
+ */
+struct queue_entry_priv_pci {
+ __le32 *desc;
+ dma_addr_t desc_dma;
+};
+
+/**
+ * rt2x00pci_rxdone - Handle RX done events
+ * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
+ *
+ * Returns true if there are still rx frames pending and false if all
+ * pending rx frames were processed.
+ */
+bool rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev);
+
+/**
+ * rt2x00pci_flush_queue - Flush data queue
+ * @queue: Data queue to stop
+ * @drop: True to drop all pending frames.
+ *
+ * This will wait for a maximum of 100ms, waiting for the queues
+ * to become empty.
+ */
+void rt2x00pci_flush_queue(struct data_queue *queue, bool drop);
+
+/*
+ * Device initialization handlers.
+ */
+int rt2x00pci_initialize(struct rt2x00_dev *rt2x00dev);
+void rt2x00pci_uninitialize(struct rt2x00_dev *rt2x00dev);
+
/*
* PCI driver handlers.
*/
diff --git a/trunk/drivers/net/wireless/rt2x00/rt61pci.c b/trunk/drivers/net/wireless/rt2x00/rt61pci.c
index 9e3c8ff53e3f..f95792cfcf89 100644
--- a/trunk/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/trunk/drivers/net/wireless/rt2x00/rt61pci.c
@@ -35,7 +35,6 @@
#include
#include "rt2x00.h"
-#include "rt2x00mmio.h"
#include "rt2x00pci.h"
#include "rt61pci.h"
diff --git a/trunk/drivers/nfc/microread/mei.c b/trunk/drivers/nfc/microread/mei.c
index ca33ae193935..eef38cfd812e 100644
--- a/trunk/drivers/nfc/microread/mei.c
+++ b/trunk/drivers/nfc/microread/mei.c
@@ -22,7 +22,7 @@
#include
#include
#include
-#include
+#include
#include
#include
@@ -32,6 +32,9 @@
#define MICROREAD_DRIVER_NAME "microread"
+#define MICROREAD_UUID UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 0x94, \
+ 0xd4, 0x50, 0x26, 0x67, 0x23, 0x77, 0x5c)
+
struct mei_nfc_hdr {
u8 cmd;
u8 status;
@@ -45,7 +48,7 @@ struct mei_nfc_hdr {
#define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD)
struct microread_mei_phy {
- struct mei_cl_device *device;
+ struct mei_device *mei_device;
struct nfc_hci_dev *hdev;
int powered;
@@ -102,14 +105,14 @@ static int microread_mei_write(void *phy_id, struct sk_buff *skb)
MEI_DUMP_SKB_OUT("mei frame sent", skb);
- r = mei_cl_send(phy->device, skb->data, skb->len);
+ r = mei_send(phy->device, skb->data, skb->len);
if (r > 0)
r = 0;
return r;
}
-static void microread_event_cb(struct mei_cl_device *device, u32 events,
+static void microread_event_cb(struct mei_device *device, u32 events,
void *context)
{
struct microread_mei_phy *phy = context;
@@ -117,7 +120,7 @@ static void microread_event_cb(struct mei_cl_device *device, u32 events,
if (phy->hard_fault != 0)
return;
- if (events & BIT(MEI_CL_EVENT_RX)) {
+ if (events & BIT(MEI_EVENT_RX)) {
struct sk_buff *skb;
int reply_size;
@@ -125,7 +128,7 @@ static void microread_event_cb(struct mei_cl_device *device, u32 events,
if (!skb)
return;
- reply_size = mei_cl_recv(device, skb->data, MEI_NFC_MAX_READ);
+ reply_size = mei_recv(device, skb->data, MEI_NFC_MAX_READ);
if (reply_size < MEI_NFC_HEADER_SIZE) {
kfree(skb);
return;
@@ -146,8 +149,8 @@ static struct nfc_phy_ops mei_phy_ops = {
.disable = microread_mei_disable,
};
-static int microread_mei_probe(struct mei_cl_device *device,
- const struct mei_cl_device_id *id)
+static int microread_mei_probe(struct mei_device *device,
+ const struct mei_id *id)
{
struct microread_mei_phy *phy;
int r;
@@ -161,9 +164,9 @@ static int microread_mei_probe(struct mei_cl_device *device,
}
phy->device = device;
- mei_cl_set_drvdata(device, phy);
+ mei_set_clientdata(device, phy);
- r = mei_cl_register_event_cb(device, microread_event_cb, phy);
+ r = mei_register_event_cb(device, microread_event_cb, phy);
if (r) {
pr_err(MICROREAD_DRIVER_NAME ": event cb registration failed\n");
goto err_out;
@@ -183,9 +186,9 @@ static int microread_mei_probe(struct mei_cl_device *device,
return r;
}
-static int microread_mei_remove(struct mei_cl_device *device)
+static int microread_mei_remove(struct mei_device *device)
{
- struct microread_mei_phy *phy = mei_cl_get_drvdata(device);
+ struct microread_mei_phy *phy = mei_get_clientdata(device);
pr_info("Removing microread\n");
@@ -199,15 +202,16 @@ static int microread_mei_remove(struct mei_cl_device *device)
return 0;
}
-static struct mei_cl_device_id microread_mei_tbl[] = {
- { MICROREAD_DRIVER_NAME },
+static struct mei_id microread_mei_tbl[] = {
+ { MICROREAD_DRIVER_NAME, MICROREAD_UUID },
/* required last entry */
{ }
};
+
MODULE_DEVICE_TABLE(mei, microread_mei_tbl);
-static struct mei_cl_driver microread_driver = {
+static struct mei_driver microread_driver = {
.id_table = microread_mei_tbl,
.name = MICROREAD_DRIVER_NAME,
@@ -221,7 +225,7 @@ static int microread_mei_init(void)
pr_debug(DRIVER_DESC ": %s\n", __func__);
- r = mei_cl_driver_register(µread_driver);
+ r = mei_driver_register(µread_driver);
if (r) {
pr_err(MICROREAD_DRIVER_NAME ": driver registration failed\n");
return r;
@@ -232,7 +236,7 @@ static int microread_mei_init(void)
static void microread_mei_exit(void)
{
- mei_cl_driver_unregister(µread_driver);
+ mei_driver_unregister(µread_driver);
}
module_init(microread_mei_init);
diff --git a/trunk/drivers/pci/pci-acpi.c b/trunk/drivers/pci/pci-acpi.c
index 5147c210df52..dee5dddaa292 100644
--- a/trunk/drivers/pci/pci-acpi.c
+++ b/trunk/drivers/pci/pci-acpi.c
@@ -53,15 +53,14 @@ static void pci_acpi_wake_dev(acpi_handle handle, u32 event, void *context)
return;
}
- /* Clear PME Status if set. */
- if (pci_dev->pme_support)
- pci_check_pme_status(pci_dev);
+ if (!pci_dev->pm_cap || !pci_dev->pme_support
+ || pci_check_pme_status(pci_dev)) {
+ if (pci_dev->pme_poll)
+ pci_dev->pme_poll = false;
- if (pci_dev->pme_poll)
- pci_dev->pme_poll = false;
-
- pci_wakeup_event(pci_dev);
- pm_runtime_resume(&pci_dev->dev);
+ pci_wakeup_event(pci_dev);
+ pm_runtime_resume(&pci_dev->dev);
+ }
if (pci_dev->subordinate)
pci_pme_wakeup_bus(pci_dev->subordinate);
diff --git a/trunk/drivers/pci/pci-driver.c b/trunk/drivers/pci/pci-driver.c
index 79277fb36c6b..1fa1e482a999 100644
--- a/trunk/drivers/pci/pci-driver.c
+++ b/trunk/drivers/pci/pci-driver.c
@@ -390,10 +390,9 @@ static void pci_device_shutdown(struct device *dev)
/*
* Turn off Bus Master bit on the device to tell it to not
- * continue to do DMA. Don't touch devices in D3cold or unknown states.
+ * continue to do DMA
*/
- if (pci_dev->current_state <= PCI_D3hot)
- pci_clear_master(pci_dev);
+ pci_clear_master(pci_dev);
}
#ifdef CONFIG_PM
diff --git a/trunk/drivers/pci/pcie/portdrv_pci.c b/trunk/drivers/pci/pcie/portdrv_pci.c
index ed4d09498337..08c243ab034e 100644
--- a/trunk/drivers/pci/pcie/portdrv_pci.c
+++ b/trunk/drivers/pci/pcie/portdrv_pci.c
@@ -184,6 +184,14 @@ static const struct dev_pm_ops pcie_portdrv_pm_ops = {
#define PCIE_PORTDRV_PM_OPS NULL
#endif /* !PM */
+/*
+ * PCIe port runtime suspend is broken for some chipsets, so use a
+ * black list to disable runtime PM for these chipsets.
+ */
+static const struct pci_device_id port_runtime_pm_black_list[] = {
+ { /* end: all zeroes */ }
+};
+
/*
* pcie_portdrv_probe - Probe PCI-Express port devices
* @dev: PCI-Express port device being probed
@@ -217,11 +225,16 @@ static int pcie_portdrv_probe(struct pci_dev *dev,
* it by default.
*/
dev->d3cold_allowed = false;
+ if (!pci_match_id(port_runtime_pm_black_list, dev))
+ pm_runtime_put_noidle(&dev->dev);
+
return 0;
}
static void pcie_portdrv_remove(struct pci_dev *dev)
{
+ if (!pci_match_id(port_runtime_pm_black_list, dev))
+ pm_runtime_get_noresume(&dev->dev);
pcie_port_device_remove(dev);
pci_disable_device(dev);
}
diff --git a/trunk/drivers/pci/probe.c b/trunk/drivers/pci/probe.c
index 92be60cf5214..b494066ef32f 100644
--- a/trunk/drivers/pci/probe.c
+++ b/trunk/drivers/pci/probe.c
@@ -988,6 +988,7 @@ int pci_setup_device(struct pci_dev *dev)
dev->sysdata = dev->bus->sysdata;
dev->dev.parent = dev->bus->bridge;
dev->dev.bus = &pci_bus_type;
+ dev->dev.type = &pci_dev_type;
dev->hdr_type = hdr_type & 0x7f;
dev->multifunction = !!(hdr_type & 0x80);
dev->error_state = pci_channel_io_normal;
@@ -1207,7 +1208,6 @@ struct pci_dev *alloc_pci_dev(void)
return NULL;
INIT_LIST_HEAD(&dev->bus_list);
- dev->dev.type = &pci_dev_type;
return dev;
}
diff --git a/trunk/drivers/pci/rom.c b/trunk/drivers/pci/rom.c
index c5d0a08a8747..b41ac7756a4b 100644
--- a/trunk/drivers/pci/rom.c
+++ b/trunk/drivers/pci/rom.c
@@ -100,6 +100,27 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
return min((size_t)(image - rom), size);
}
+static loff_t pci_find_rom(struct pci_dev *pdev, size_t *size)
+{
+ struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
+ loff_t start;
+
+ /* assign the ROM an address if it doesn't have one */
+ if (res->parent == NULL && pci_assign_resource(pdev, PCI_ROM_RESOURCE))
+ return 0;
+ start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
+ *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
+
+ if (*size == 0)
+ return 0;
+
+ /* Enable ROM space decodes */
+ if (pci_enable_rom(pdev))
+ return 0;
+
+ return start;
+}
+
/**
* pci_map_rom - map a PCI ROM to kernel space
* @pdev: pointer to pci device struct
@@ -114,7 +135,7 @@ size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size)
void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
{
struct resource *res = &pdev->resource[PCI_ROM_RESOURCE];
- loff_t start;
+ loff_t start = 0;
void __iomem *rom;
/*
@@ -133,21 +154,21 @@ void __iomem *pci_map_rom(struct pci_dev *pdev, size_t *size)
return (void __iomem *)(unsigned long)
pci_resource_start(pdev, PCI_ROM_RESOURCE);
} else {
- /* assign the ROM an address if it doesn't have one */
- if (res->parent == NULL &&
- pci_assign_resource(pdev,PCI_ROM_RESOURCE))
- return NULL;
- start = pci_resource_start(pdev, PCI_ROM_RESOURCE);
- *size = pci_resource_len(pdev, PCI_ROM_RESOURCE);
- if (*size == 0)
- return NULL;
-
- /* Enable ROM space decodes */
- if (pci_enable_rom(pdev))
- return NULL;
+ start = pci_find_rom(pdev, size);
}
}
+ /*
+ * Some devices may provide ROMs via a source other than the BAR
+ */
+ if (!start && pdev->rom && pdev->romlen) {
+ *size = pdev->romlen;
+ return phys_to_virt(pdev->rom);
+ }
+
+ if (!start)
+ return NULL;
+
rom = ioremap(start, *size);
if (!rom) {
/* restore enable if ioremap fails */
@@ -181,7 +202,8 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
if (res->flags & (IORESOURCE_ROM_COPY | IORESOURCE_ROM_BIOS_COPY))
return;
- iounmap(rom);
+ if (!pdev->rom || !pdev->romlen)
+ iounmap(rom);
/* Disable again before continuing, leave enabled if pci=rom */
if (!(res->flags & (IORESOURCE_ROM_ENABLE | IORESOURCE_ROM_SHADOW)))
@@ -205,24 +227,7 @@ void pci_cleanup_rom(struct pci_dev *pdev)
}
}
-/**
- * pci_platform_rom - provides a pointer to any ROM image provided by the
- * platform
- * @pdev: pointer to pci device struct
- * @size: pointer to receive size of pci window over ROM
- */
-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
-{
- if (pdev->rom && pdev->romlen) {
- *size = pdev->romlen;
- return phys_to_virt((phys_addr_t)pdev->rom);
- }
-
- return NULL;
-}
-
EXPORT_SYMBOL(pci_map_rom);
EXPORT_SYMBOL(pci_unmap_rom);
EXPORT_SYMBOL_GPL(pci_enable_rom);
EXPORT_SYMBOL_GPL(pci_disable_rom);
-EXPORT_SYMBOL(pci_platform_rom);
diff --git a/trunk/drivers/platform/x86/hp-wmi.c b/trunk/drivers/platform/x86/hp-wmi.c
index 1a779bbfb87d..45cacf79f3a7 100644
--- a/trunk/drivers/platform/x86/hp-wmi.c
+++ b/trunk/drivers/platform/x86/hp-wmi.c
@@ -134,6 +134,7 @@ static const struct key_entry hp_wmi_keymap[] = {
{ KE_KEY, 0x2142, { KEY_MEDIA } },
{ KE_KEY, 0x213b, { KEY_INFO } },
{ KE_KEY, 0x2169, { KEY_DIRECTION } },
+ { KE_KEY, 0x216a, { KEY_SETUP } },
{ KE_KEY, 0x231b, { KEY_HELP } },
{ KE_END, 0 }
};
@@ -924,6 +925,9 @@ static int __init hp_wmi_init(void)
err = hp_wmi_input_setup();
if (err)
return err;
+
+ //Enable magic for hotkeys that run on the SMBus
+ ec_write(0xe6,0x6e);
}
if (bios_capable) {
diff --git a/trunk/drivers/platform/x86/thinkpad_acpi.c b/trunk/drivers/platform/x86/thinkpad_acpi.c
index edec135b1685..9a907567f41e 100644
--- a/trunk/drivers/platform/x86/thinkpad_acpi.c
+++ b/trunk/drivers/platform/x86/thinkpad_acpi.c
@@ -1964,6 +1964,9 @@ struct tp_nvram_state {
/* kthread for the hotkey poller */
static struct task_struct *tpacpi_hotkey_task;
+/* Acquired while the poller kthread is running, use to sync start/stop */
+static struct mutex hotkey_thread_mutex;
+
/*
* Acquire mutex to write poller control variables as an
* atomic block.
@@ -2459,6 +2462,8 @@ static int hotkey_kthread(void *data)
unsigned int poll_freq;
bool was_frozen;
+ mutex_lock(&hotkey_thread_mutex);
+
if (tpacpi_lifecycle == TPACPI_LIFE_EXITING)
goto exit;
@@ -2518,6 +2523,7 @@ static int hotkey_kthread(void *data)
}
exit:
+ mutex_unlock(&hotkey_thread_mutex);
return 0;
}
@@ -2527,6 +2533,9 @@ static void hotkey_poll_stop_sync(void)
if (tpacpi_hotkey_task) {
kthread_stop(tpacpi_hotkey_task);
tpacpi_hotkey_task = NULL;
+ mutex_lock(&hotkey_thread_mutex);
+ /* at this point, the thread did exit */
+ mutex_unlock(&hotkey_thread_mutex);
}
}
@@ -3225,6 +3234,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
mutex_init(&hotkey_mutex);
#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
+ mutex_init(&hotkey_thread_mutex);
mutex_init(&hotkey_thread_data_mutex);
#endif
diff --git a/trunk/drivers/pnp/pnpbios/core.c b/trunk/drivers/pnp/pnpbios/core.c
index 9b86a01af631..5d66e5585f97 100644
--- a/trunk/drivers/pnp/pnpbios/core.c
+++ b/trunk/drivers/pnp/pnpbios/core.c
@@ -513,6 +513,10 @@ static int __init pnpbios_init(void)
{
int ret;
+#if defined(CONFIG_PPC)
+ if (check_legacy_ioport(PNPBIOS_BASE))
+ return -ENODEV;
+#endif
if (pnpbios_disabled || dmi_check_system(pnpbios_dmi_table) ||
paravirt_enabled()) {
printk(KERN_INFO "PnPBIOS: Disabled\n");
@@ -566,7 +570,10 @@ fs_initcall(pnpbios_init);
static int __init pnpbios_thread_init(void)
{
struct task_struct *task;
-
+#if defined(CONFIG_PPC)
+ if (check_legacy_ioport(PNPBIOS_BASE))
+ return 0;
+#endif
if (pnpbios_disabled)
return 0;
diff --git a/trunk/drivers/remoteproc/Kconfig b/trunk/drivers/remoteproc/Kconfig
index c6d77e20622c..cc1f7bf53fd0 100644
--- a/trunk/drivers/remoteproc/Kconfig
+++ b/trunk/drivers/remoteproc/Kconfig
@@ -4,7 +4,7 @@ menu "Remoteproc drivers"
config REMOTEPROC
tristate
depends on HAS_DMA
- select FW_LOADER
+ select FW_CONFIG
select VIRTIO
config OMAP_REMOTEPROC
diff --git a/trunk/drivers/remoteproc/remoteproc_core.c b/trunk/drivers/remoteproc/remoteproc_core.c
index 8edb4aed5d36..29387df4bfc9 100644
--- a/trunk/drivers/remoteproc/remoteproc_core.c
+++ b/trunk/drivers/remoteproc/remoteproc_core.c
@@ -217,7 +217,7 @@ int rproc_alloc_vring(struct rproc_vdev *rvdev, int i)
* TODO: support predefined notifyids (via resource table)
*/
ret = idr_alloc(&rproc->notifyids, rvring, 0, 0, GFP_KERNEL);
- if (ret < 0) {
+ if (ret) {
dev_err(dev, "idr_alloc failed: %d\n", ret);
dma_free_coherent(dev->parent, size, va, dma);
return ret;
@@ -366,12 +366,10 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
/* it is now safe to add the virtio device */
ret = rproc_add_virtio_dev(rvdev, rsc->id);
if (ret)
- goto remove_rvdev;
+ goto free_rvdev;
return 0;
-remove_rvdev:
- list_del(&rvdev->node);
free_rvdev:
kfree(rvdev);
return ret;
diff --git a/trunk/drivers/remoteproc/ste_modem_rproc.c b/trunk/drivers/remoteproc/ste_modem_rproc.c
index fb95c4220052..a7743c069339 100644
--- a/trunk/drivers/remoteproc/ste_modem_rproc.c
+++ b/trunk/drivers/remoteproc/ste_modem_rproc.c
@@ -240,8 +240,6 @@ static int sproc_drv_remove(struct platform_device *pdev)
/* Unregister as remoteproc device */
rproc_del(sproc->rproc);
- dma_free_coherent(sproc->rproc->dev.parent, SPROC_FW_SIZE,
- sproc->fw_addr, sproc->fw_dma_addr);
rproc_put(sproc->rproc);
mdev->drv_data = NULL;
@@ -299,13 +297,10 @@ static int sproc_probe(struct platform_device *pdev)
/* Register as a remoteproc device */
err = rproc_add(rproc);
if (err)
- goto free_mem;
+ goto free_rproc;
return 0;
-free_mem:
- dma_free_coherent(rproc->dev.parent, SPROC_FW_SIZE,
- sproc->fw_addr, sproc->fw_dma_addr);
free_rproc:
/* Reset device data upon error */
mdev->drv_data = NULL;
diff --git a/trunk/drivers/rtc/rtc-at91rm9200.c b/trunk/drivers/rtc/rtc-at91rm9200.c
index 434ebc3a99dc..0a9f27e094ea 100644
--- a/trunk/drivers/rtc/rtc-at91rm9200.c
+++ b/trunk/drivers/rtc/rtc-at91rm9200.c
@@ -44,6 +44,7 @@ static DECLARE_COMPLETION(at91_rtc_updated);
static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
static void __iomem *at91_rtc_regs;
static int irq;
+static u32 at91_rtc_imr;
/*
* Decode time/date into rtc_time structure
@@ -108,9 +109,11 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
cr = at91_rtc_read(AT91_RTC_CR);
at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
+ at91_rtc_imr |= AT91_RTC_ACKUPD;
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ACKUPD);
wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD);
+ at91_rtc_imr &= ~AT91_RTC_ACKUPD;
at91_rtc_write(AT91_RTC_TIMR,
bin2bcd(tm->tm_sec) << 0
@@ -142,7 +145,7 @@ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year);
tm->tm_year = at91_alarm_year - 1900;
- alrm->enabled = (at91_rtc_read(AT91_RTC_IMR) & AT91_RTC_ALARM)
+ alrm->enabled = (at91_rtc_imr & AT91_RTC_ALARM)
? 1 : 0;
dev_dbg(dev, "%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__,
@@ -168,6 +171,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
tm.tm_sec = alrm->time.tm_sec;
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_imr &= ~AT91_RTC_ALARM;
at91_rtc_write(AT91_RTC_TIMALR,
bin2bcd(tm.tm_sec) << 0
| bin2bcd(tm.tm_min) << 8
@@ -180,6 +184,7 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
if (alrm->enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_imr |= AT91_RTC_ALARM;
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
}
@@ -196,9 +201,12 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
if (enabled) {
at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_ALARM);
+ at91_rtc_imr |= AT91_RTC_ALARM;
at91_rtc_write(AT91_RTC_IER, AT91_RTC_ALARM);
- } else
+ } else {
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ALARM);
+ at91_rtc_imr &= ~AT91_RTC_ALARM;
+ }
return 0;
}
@@ -207,12 +215,10 @@ static int at91_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
*/
static int at91_rtc_proc(struct device *dev, struct seq_file *seq)
{
- unsigned long imr = at91_rtc_read(AT91_RTC_IMR);
-
seq_printf(seq, "update_IRQ\t: %s\n",
- (imr & AT91_RTC_ACKUPD) ? "yes" : "no");
+ (at91_rtc_imr & AT91_RTC_ACKUPD) ? "yes" : "no");
seq_printf(seq, "periodic_IRQ\t: %s\n",
- (imr & AT91_RTC_SECEV) ? "yes" : "no");
+ (at91_rtc_imr & AT91_RTC_SECEV) ? "yes" : "no");
return 0;
}
@@ -227,7 +233,7 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
unsigned int rtsr;
unsigned long events = 0;
- rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_read(AT91_RTC_IMR);
+ rtsr = at91_rtc_read(AT91_RTC_SR) & at91_rtc_imr;
if (rtsr) { /* this interrupt is shared! Is it ours? */
if (rtsr & AT91_RTC_ALARM)
events |= (RTC_AF | RTC_IRQF);
@@ -291,6 +297,7 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
+ at91_rtc_imr = 0;
ret = request_irq(irq, at91_rtc_interrupt,
IRQF_SHARED,
@@ -329,6 +336,7 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
at91_rtc_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM |
AT91_RTC_SECEV | AT91_RTC_TIMEV |
AT91_RTC_CALEV);
+ at91_rtc_imr = 0;
free_irq(irq, pdev);
rtc_device_unregister(rtc);
@@ -341,31 +349,35 @@ static int __exit at91_rtc_remove(struct platform_device *pdev)
/* AT91RM9200 RTC Power management control */
-static u32 at91_rtc_imr;
+static u32 at91_rtc_bkpimr;
+
static int at91_rtc_suspend(struct device *dev)
{
/* this IRQ is shared with DBGU and other hardware which isn't
* necessarily doing PM like we are...
*/
- at91_rtc_imr = at91_rtc_read(AT91_RTC_IMR)
- & (AT91_RTC_ALARM|AT91_RTC_SECEV);
- if (at91_rtc_imr) {
- if (device_may_wakeup(dev))
+ at91_rtc_bkpimr = at91_rtc_imr & (AT91_RTC_ALARM|AT91_RTC_SECEV);
+ if (at91_rtc_bkpimr) {
+ if (device_may_wakeup(dev)) {
enable_irq_wake(irq);
- else
- at91_rtc_write(AT91_RTC_IDR, at91_rtc_imr);
- }
+ } else {
+ at91_rtc_write(AT91_RTC_IDR, at91_rtc_bkpimr);
+ at91_rtc_imr &= ~at91_rtc_bkpimr;
+ }
+}
return 0;
}
static int at91_rtc_resume(struct device *dev)
{
- if (at91_rtc_imr) {
- if (device_may_wakeup(dev))
+ if (at91_rtc_bkpimr) {
+ if (device_may_wakeup(dev)) {
disable_irq_wake(irq);
- else
- at91_rtc_write(AT91_RTC_IER, at91_rtc_imr);
+ } else {
+ at91_rtc_imr |= at91_rtc_bkpimr;
+ at91_rtc_write(AT91_RTC_IER, at91_rtc_bkpimr);
+ }
}
return 0;
}
diff --git a/trunk/drivers/rtc/rtc-at91rm9200.h b/trunk/drivers/rtc/rtc-at91rm9200.h
index da1945e5f714..5f940b6844cb 100644
--- a/trunk/drivers/rtc/rtc-at91rm9200.h
+++ b/trunk/drivers/rtc/rtc-at91rm9200.h
@@ -64,7 +64,6 @@
#define AT91_RTC_SCCR 0x1c /* Status Clear Command Register */
#define AT91_RTC_IER 0x20 /* Interrupt Enable Register */
#define AT91_RTC_IDR 0x24 /* Interrupt Disable Register */
-#define AT91_RTC_IMR 0x28 /* Interrupt Mask Register */
#define AT91_RTC_VER 0x2c /* Valid Entry Register */
#define AT91_RTC_NVTIM (1 << 0) /* Non valid Time */
diff --git a/trunk/drivers/s390/block/scm_blk.c b/trunk/drivers/s390/block/scm_blk.c
index e9b9c8392832..5ac9c935c151 100644
--- a/trunk/drivers/s390/block/scm_blk.c
+++ b/trunk/drivers/s390/block/scm_blk.c
@@ -307,7 +307,7 @@ static void scm_blk_handle_error(struct scm_request *scmrq)
case EQC_WR_PROHIBIT:
spin_lock_irqsave(&bdev->lock, flags);
if (bdev->state != SCM_WR_PROHIBIT)
- pr_info("%lx: Write access to the SCM increment is suspended\n",
+ pr_info("%lu: Write access to the SCM increment is suspended\n",
(unsigned long) bdev->scmdev->address);
bdev->state = SCM_WR_PROHIBIT;
spin_unlock_irqrestore(&bdev->lock, flags);
@@ -445,7 +445,7 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
spin_lock_irqsave(&bdev->lock, flags);
if (bdev->state == SCM_WR_PROHIBIT)
- pr_info("%lx: Write access to the SCM increment is restored\n",
+ pr_info("%lu: Write access to the SCM increment is restored\n",
(unsigned long) bdev->scmdev->address);
bdev->state = SCM_OPER;
spin_unlock_irqrestore(&bdev->lock, flags);
@@ -463,15 +463,12 @@ static int __init scm_blk_init(void)
goto out;
scm_major = ret;
- ret = scm_alloc_rqs(nr_requests);
- if (ret)
+ if (scm_alloc_rqs(nr_requests))
goto out_unreg;
scm_debug = debug_register("scm_log", 16, 1, 16);
- if (!scm_debug) {
- ret = -ENOMEM;
+ if (!scm_debug)
goto out_free;
- }
debug_register_view(scm_debug, &debug_hex_ascii_view);
debug_set_level(scm_debug, 2);
diff --git a/trunk/drivers/s390/block/scm_drv.c b/trunk/drivers/s390/block/scm_drv.c
index c98cf52d78d1..5f6180d6ff08 100644
--- a/trunk/drivers/s390/block/scm_drv.c
+++ b/trunk/drivers/s390/block/scm_drv.c
@@ -19,7 +19,7 @@ static void scm_notify(struct scm_device *scmdev, enum scm_event event)
switch (event) {
case SCM_CHANGE:
- pr_info("%lx: The capabilities of the SCM increment changed\n",
+ pr_info("%lu: The capabilities of the SCM increment changed\n",
(unsigned long) scmdev->address);
SCM_LOG(2, "State changed");
SCM_LOG_STATE(2, scmdev);
diff --git a/trunk/drivers/s390/char/tty3270.c b/trunk/drivers/s390/char/tty3270.c
index cee69dac3e18..b907dba24025 100644
--- a/trunk/drivers/s390/char/tty3270.c
+++ b/trunk/drivers/s390/char/tty3270.c
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
int i, rc;
/* Check if the tty3270 is already there. */
- view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR);
+ view = raw3270_find_view(&tty3270_fn, tty->index);
if (!IS_ERR(view)) {
tp = container_of(view, struct tty3270, view);
tty->driver_data = tp;
@@ -927,16 +927,15 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
tp->inattr = TF_INPUT;
return tty_port_install(&tp->port, driver, tty);
}
- if (tty3270_max_index < tty->index + 1)
- tty3270_max_index = tty->index + 1;
+ if (tty3270_max_index < tty->index)
+ tty3270_max_index = tty->index;
/* Allocate tty3270 structure on first open. */
tp = tty3270_alloc_view();
if (IS_ERR(tp))
return PTR_ERR(tp);
- rc = raw3270_add_view(&tp->view, &tty3270_fn,
- tty->index + RAW3270_FIRSTMINOR);
+ rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index);
if (rc) {
tty3270_free_view(tp);
return rc;
@@ -1847,12 +1846,12 @@ static const struct tty_operations tty3270_ops = {
void tty3270_create_cb(int minor)
{
- tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL);
+ tty_register_device(tty3270_driver, minor, NULL);
}
void tty3270_destroy_cb(int minor)
{
- tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR);
+ tty_unregister_device(tty3270_driver, minor);
}
struct raw3270_notifier tty3270_notifier =
@@ -1885,8 +1884,7 @@ static int __init tty3270_init(void)
driver->driver_name = "tty3270";
driver->name = "3270/tty";
driver->major = IBM_TTY3270_MAJOR;
- driver->minor_start = RAW3270_FIRSTMINOR;
- driver->name_base = RAW3270_FIRSTMINOR;
+ driver->minor_start = 0;
driver->type = TTY_DRIVER_TYPE_SYSTEM;
driver->subtype = SYSTEM_TYPE_TTY;
driver->init_termios = tty_std_termios;
diff --git a/trunk/drivers/s390/net/qeth_core.h b/trunk/drivers/s390/net/qeth_core.h
index 6ccb7457746b..8c0622399fcd 100644
--- a/trunk/drivers/s390/net/qeth_core.h
+++ b/trunk/drivers/s390/net/qeth_core.h
@@ -769,7 +769,6 @@ struct qeth_card {
unsigned long thread_start_mask;
unsigned long thread_allowed_mask;
unsigned long thread_running_mask;
- struct task_struct *recovery_task;
spinlock_t ip_lock;
struct list_head ip_list;
struct list_head *ip_tbd_list;
@@ -863,8 +862,6 @@ extern struct qeth_card_list_struct qeth_core_card_list;
extern struct kmem_cache *qeth_core_header_cache;
extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS];
-void qeth_set_recovery_task(struct qeth_card *);
-void qeth_clear_recovery_task(struct qeth_card *);
void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int);
int qeth_threads_running(struct qeth_card *, unsigned long);
int qeth_wait_for_threads(struct qeth_card *, unsigned long);
diff --git a/trunk/drivers/s390/net/qeth_core_main.c b/trunk/drivers/s390/net/qeth_core_main.c
index 451f92020599..0d73a999983d 100644
--- a/trunk/drivers/s390/net/qeth_core_main.c
+++ b/trunk/drivers/s390/net/qeth_core_main.c
@@ -177,23 +177,6 @@ const char *qeth_get_cardname_short(struct qeth_card *card)
return "n/a";
}
-void qeth_set_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = current;
-}
-EXPORT_SYMBOL_GPL(qeth_set_recovery_task);
-
-void qeth_clear_recovery_task(struct qeth_card *card)
-{
- card->recovery_task = NULL;
-}
-EXPORT_SYMBOL_GPL(qeth_clear_recovery_task);
-
-static bool qeth_is_recovery_task(const struct qeth_card *card)
-{
- return card->recovery_task == current;
-}
-
void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
int clear_start_mask)
{
@@ -222,8 +205,6 @@ EXPORT_SYMBOL_GPL(qeth_threads_running);
int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads)
{
- if (qeth_is_recovery_task(card))
- return 0;
return wait_event_interruptible(card->wait_q,
qeth_threads_running(card, threads) == 0);
}
diff --git a/trunk/drivers/s390/net/qeth_l2_main.c b/trunk/drivers/s390/net/qeth_l2_main.c
index 155b101bd730..d690166efeaf 100644
--- a/trunk/drivers/s390/net/qeth_l2_main.c
+++ b/trunk/drivers/s390/net/qeth_l2_main.c
@@ -1143,7 +1143,6 @@ static int qeth_l2_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l2_set_offline(card->gdev, 1);
rc = __qeth_l2_set_online(card->gdev, 1);
if (!rc)
@@ -1154,7 +1153,6 @@ static int qeth_l2_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
diff --git a/trunk/drivers/s390/net/qeth_l3_main.c b/trunk/drivers/s390/net/qeth_l3_main.c
index 1f7edf1b26c3..8710337dab3e 100644
--- a/trunk/drivers/s390/net/qeth_l3_main.c
+++ b/trunk/drivers/s390/net/qeth_l3_main.c
@@ -3515,7 +3515,6 @@ static int qeth_l3_recover(void *ptr)
QETH_CARD_TEXT(card, 2, "recover2");
dev_warn(&card->gdev->dev,
"A recovery process has been started for the device\n");
- qeth_set_recovery_task(card);
__qeth_l3_set_offline(card->gdev, 1);
rc = __qeth_l3_set_online(card->gdev, 1);
if (!rc)
@@ -3526,7 +3525,6 @@ static int qeth_l3_recover(void *ptr)
dev_warn(&card->gdev->dev, "The qeth device driver "
"failed to recover an error on the device\n");
}
- qeth_clear_recovery_task(card);
qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD);
return 0;
diff --git a/trunk/drivers/sbus/char/bbc_i2c.c b/trunk/drivers/sbus/char/bbc_i2c.c
index c1441ed282eb..1a9d1e3ce64c 100644
--- a/trunk/drivers/sbus/char/bbc_i2c.c
+++ b/trunk/drivers/sbus/char/bbc_i2c.c
@@ -282,7 +282,7 @@ static irqreturn_t bbc_i2c_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static void reset_one_i2c(struct bbc_i2c_bus *bp)
+static void __init reset_one_i2c(struct bbc_i2c_bus *bp)
{
writeb(I2C_PCF_PIN, bp->i2c_control_regs + 0x0);
writeb(bp->own, bp->i2c_control_regs + 0x1);
@@ -291,7 +291,7 @@ static void reset_one_i2c(struct bbc_i2c_bus *bp)
writeb(I2C_PCF_IDLE, bp->i2c_control_regs + 0x0);
}
-static struct bbc_i2c_bus * attach_one_i2c(struct platform_device *op, int index)
+static struct bbc_i2c_bus * __init attach_one_i2c(struct platform_device *op, int index)
{
struct bbc_i2c_bus *bp;
struct device_node *dp;
diff --git a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 90bc7bd00966..2daf4b0da434 100644
--- a/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/trunk/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -940,7 +940,6 @@ static int bnx2fc_libfc_config(struct fc_lport *lport)
fc_exch_init(lport);
fc_rport_init(lport);
fc_disc_init(lport);
- fc_disc_config(lport, lport);
return 0;
}
@@ -2134,7 +2133,6 @@ static int _bnx2fc_create(struct net_device *netdev,
}
ctlr = bnx2fc_to_ctlr(interface);
- cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
interface->vlan_id = vlan_id;
interface->timer_work_queue =
@@ -2145,7 +2143,7 @@ static int _bnx2fc_create(struct net_device *netdev,
goto ifput_err;
}
- lport = bnx2fc_if_create(interface, &cdev->dev, 0);
+ lport = bnx2fc_if_create(interface, &interface->hba->pcidev->dev, 0);
if (!lport) {
printk(KERN_ERR PFX "Failed to create interface (%s)\n",
netdev->name);
@@ -2161,6 +2159,8 @@ static int _bnx2fc_create(struct net_device *netdev,
/* Make this master N_port */
ctlr->lp = lport;
+ cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
+
if (link_state == BNX2FC_CREATE_LINK_UP)
cdev->enabled = FCOE_CTLR_ENABLED;
else
diff --git a/trunk/drivers/scsi/fcoe/fcoe.c b/trunk/drivers/scsi/fcoe/fcoe.c
index 9bfdc9a3f897..b5d92fc93c70 100644
--- a/trunk/drivers/scsi/fcoe/fcoe.c
+++ b/trunk/drivers/scsi/fcoe/fcoe.c
@@ -490,6 +490,7 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
{
struct net_device *netdev = fcoe->netdev;
struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
+ struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
rtnl_lock();
if (!fcoe->removed)
@@ -500,6 +501,7 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
/* tear-down the FCoE controller */
fcoe_ctlr_destroy(fip);
scsi_host_put(fip->lp->host);
+ fcoe_ctlr_device_delete(ctlr_dev);
dev_put(netdev);
module_put(THIS_MODULE);
}
@@ -2192,8 +2194,6 @@ static int fcoe_destroy(struct net_device *netdev)
*/
static void fcoe_destroy_work(struct work_struct *work)
{
- struct fcoe_ctlr_device *cdev;
- struct fcoe_ctlr *ctlr;
struct fcoe_port *port;
struct fcoe_interface *fcoe;
struct Scsi_Host *shost;
@@ -2224,15 +2224,10 @@ static void fcoe_destroy_work(struct work_struct *work)
mutex_lock(&fcoe_config_mutex);
fcoe = port->priv;
- ctlr = fcoe_to_ctlr(fcoe);
- cdev = fcoe_ctlr_to_ctlr_dev(ctlr);
-
fcoe_if_destroy(port->lport);
fcoe_interface_cleanup(fcoe);
mutex_unlock(&fcoe_config_mutex);
-
- fcoe_ctlr_device_delete(cdev);
}
/**
@@ -2340,9 +2335,7 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
rc = -EIO;
rtnl_unlock();
fcoe_interface_cleanup(fcoe);
- mutex_unlock(&fcoe_config_mutex);
- fcoe_ctlr_device_delete(ctlr_dev);
- goto out;
+ goto out_nortnl;
}
/* Make this the "master" N_Port */
@@ -2382,8 +2375,8 @@ static int _fcoe_create(struct net_device *netdev, enum fip_state fip_mode,
out_nodev:
rtnl_unlock();
+out_nortnl:
mutex_unlock(&fcoe_config_mutex);
-out:
return rc;
}
diff --git a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c
index a76247201be5..08c3bc398da2 100644
--- a/trunk/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/trunk/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -2814,47 +2814,6 @@ static void fcoe_ctlr_vn_timeout(struct fcoe_ctlr *fip)
fc_lport_set_local_id(fip->lp, new_port_id);
}
-/**
- * fcoe_ctlr_mode_set() - Set or reset the ctlr's mode
- * @lport: The local port to be (re)configured
- * @fip: The FCoE controller whose mode is changing
- * @fip_mode: The new fip mode
- *
- * Note that the we shouldn't be changing the libfc discovery settings
- * (fc_disc_config) while an lport is going through the libfc state
- * machine. The mode can only be changed when a fcoe_ctlr device is
- * disabled, so that should ensure that this routine is only called
- * when nothing is happening.
- */
-void fcoe_ctlr_mode_set(struct fc_lport *lport, struct fcoe_ctlr *fip,
- enum fip_state fip_mode)
-{
- void *priv;
-
- WARN_ON(lport->state != LPORT_ST_RESET &&
- lport->state != LPORT_ST_DISABLED);
-
- if (fip_mode == FIP_MODE_VN2VN) {
- lport->rport_priv_size = sizeof(struct fcoe_rport);
- lport->point_to_multipoint = 1;
- lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
- lport->tt.disc_start = fcoe_ctlr_disc_start;
- lport->tt.disc_stop = fcoe_ctlr_disc_stop;
- lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
- priv = fip;
- } else {
- lport->rport_priv_size = 0;
- lport->point_to_multipoint = 0;
- lport->tt.disc_recv_req = NULL;
- lport->tt.disc_start = NULL;
- lport->tt.disc_stop = NULL;
- lport->tt.disc_stop_final = NULL;
- priv = lport;
- }
-
- fc_disc_config(lport, priv);
-}
-
/**
* fcoe_libfc_config() - Sets up libfc related properties for local port
* @lport: The local port to configure libfc for
@@ -2874,9 +2833,21 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
fc_exch_init(lport);
fc_elsct_init(lport);
fc_lport_init(lport);
+ if (fip->mode == FIP_MODE_VN2VN)
+ lport->rport_priv_size = sizeof(struct fcoe_rport);
fc_rport_init(lport);
- fc_disc_init(lport);
- fcoe_ctlr_mode_set(lport, fip, fip->mode);
+ if (fip->mode == FIP_MODE_VN2VN) {
+ lport->point_to_multipoint = 1;
+ lport->tt.disc_recv_req = fcoe_ctlr_disc_recv;
+ lport->tt.disc_start = fcoe_ctlr_disc_start;
+ lport->tt.disc_stop = fcoe_ctlr_disc_stop;
+ lport->tt.disc_stop_final = fcoe_ctlr_disc_stop_final;
+ mutex_init(&lport->disc.disc_mutex);
+ INIT_LIST_HEAD(&lport->disc.rports);
+ lport->disc.priv = fip;
+ } else {
+ fc_disc_init(lport);
+ }
return 0;
}
EXPORT_SYMBOL_GPL(fcoe_libfc_config);
@@ -2904,7 +2875,6 @@ EXPORT_SYMBOL(fcoe_fcf_get_selected);
void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
{
struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
- struct fc_lport *lport = ctlr->lp;
mutex_lock(&ctlr->ctlr_mutex);
switch (ctlr_dev->mode) {
@@ -2918,7 +2888,5 @@ void fcoe_ctlr_set_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
}
mutex_unlock(&ctlr->ctlr_mutex);
-
- fcoe_ctlr_mode_set(lport, ctlr, ctlr->mode);
}
EXPORT_SYMBOL(fcoe_ctlr_set_fip_mode);
diff --git a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
index d0fa4b6c551f..a044f593e8b9 100644
--- a/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/trunk/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1899,8 +1899,8 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
sdev->allow_restart = 1;
blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
}
- spin_unlock_irqrestore(shost->host_lock, lock_flags);
scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
+ spin_unlock_irqrestore(shost->host_lock, lock_flags);
return 0;
}
diff --git a/trunk/drivers/scsi/ipr.c b/trunk/drivers/scsi/ipr.c
index 2197b57fb225..f328089a1060 100644
--- a/trunk/drivers/scsi/ipr.c
+++ b/trunk/drivers/scsi/ipr.c
@@ -5148,7 +5148,7 @@ static int ipr_cancel_op(struct scsi_cmnd *scsi_cmd)
ipr_trace;
}
- list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
+ list_add_tail(&ipr_cmd->queue, &hrrq->hrrq_free_q);
if (!ipr_is_naca_model(res))
res->needs_sync_complete = 1;
@@ -9349,10 +9349,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- rc = request_irq(ioa_cfg->vectors_info[0].vec, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
- else
- rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
+ rc = request_irq(pdev->irq, ipr_test_intr, 0, IPR_NAME, ioa_cfg);
if (rc) {
dev_err(&pdev->dev, "Can not assign irq %d\n", pdev->irq);
return rc;
@@ -9374,10 +9371,7 @@ static int ipr_test_msi(struct ipr_ioa_cfg *ioa_cfg, struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
- if (ioa_cfg->intr_flag == IPR_USE_MSIX)
- free_irq(ioa_cfg->vectors_info[0].vec, ioa_cfg);
- else
- free_irq(pdev->irq, ioa_cfg);
+ free_irq(pdev->irq, ioa_cfg);
LEAVE;
@@ -9728,7 +9722,6 @@ static void __ipr_remove(struct pci_dev *pdev)
spin_unlock_irqrestore(ioa_cfg->host->host_lock, host_lock_flags);
wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
flush_work(&ioa_cfg->work_q);
- INIT_LIST_HEAD(&ioa_cfg->used_res_q);
spin_lock_irqsave(ioa_cfg->host->host_lock, host_lock_flags);
spin_lock(&ipr_driver_lock);
diff --git a/trunk/drivers/scsi/libfc/fc_disc.c b/trunk/drivers/scsi/libfc/fc_disc.c
index 880a9068ca12..8e561e6a557c 100644
--- a/trunk/drivers/scsi/libfc/fc_disc.c
+++ b/trunk/drivers/scsi/libfc/fc_disc.c
@@ -712,13 +712,12 @@ static void fc_disc_stop_final(struct fc_lport *lport)
}
/**
- * fc_disc_config() - Configure the discovery layer for a local port
- * @lport: The local port that needs the discovery layer to be configured
- * @priv: Private data structre for users of the discovery layer
+ * fc_disc_init() - Initialize the discovery layer for a local port
+ * @lport: The local port that needs the discovery layer to be initialized
*/
-void fc_disc_config(struct fc_lport *lport, void *priv)
+int fc_disc_init(struct fc_lport *lport)
{
- struct fc_disc *disc = &lport->disc;
+ struct fc_disc *disc;
if (!lport->tt.disc_start)
lport->tt.disc_start = fc_disc_start;
@@ -733,21 +732,12 @@ void fc_disc_config(struct fc_lport *lport, void *priv)
lport->tt.disc_recv_req = fc_disc_recv_req;
disc = &lport->disc;
-
- disc->priv = priv;
-}
-EXPORT_SYMBOL(fc_disc_config);
-
-/**
- * fc_disc_init() - Initialize the discovery layer for a local port
- * @lport: The local port that needs the discovery layer to be initialized
- */
-void fc_disc_init(struct fc_lport *lport)
-{
- struct fc_disc *disc = &lport->disc;
-
INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
mutex_init(&disc->disc_mutex);
INIT_LIST_HEAD(&disc->rports);
+
+ disc->priv = lport;
+
+ return 0;
}
EXPORT_SYMBOL(fc_disc_init);
diff --git a/trunk/drivers/scsi/libsas/sas_expander.c b/trunk/drivers/scsi/libsas/sas_expander.c
index 55cbd0180159..aec2e0da5016 100644
--- a/trunk/drivers/scsi/libsas/sas_expander.c
+++ b/trunk/drivers/scsi/libsas/sas_expander.c
@@ -235,17 +235,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
linkrate = phy->linkrate;
memcpy(sas_addr, phy->attached_sas_addr, SAS_ADDR_SIZE);
- /* Handle vacant phy - rest of dr data is not valid so skip it */
- if (phy->phy_state == PHY_VACANT) {
- memset(phy->attached_sas_addr, 0, SAS_ADDR_SIZE);
- phy->attached_dev_type = NO_DEVICE;
- if (!test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
- phy->phy_id = phy_id;
- goto skip;
- } else
- goto out;
- }
-
phy->attached_dev_type = to_dev_type(dr);
if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state))
goto out;
@@ -283,7 +272,6 @@ static void sas_set_ex_phy(struct domain_device *dev, int phy_id, void *rsp)
phy->phy->maximum_linkrate = dr->pmax_linkrate;
phy->phy->negotiated_linkrate = phy->linkrate;
- skip:
if (new_phy)
if (sas_phy_add(phy->phy)) {
sas_phy_free(phy->phy);
@@ -400,7 +388,7 @@ int sas_ex_phy_discover(struct domain_device *dev, int single)
if (!disc_req)
return -ENOMEM;
- disc_resp = alloc_smp_resp(DISCOVER_RESP_SIZE);
+ disc_resp = alloc_smp_req(DISCOVER_RESP_SIZE);
if (!disc_resp) {
kfree(disc_req);
return -ENOMEM;
diff --git a/trunk/drivers/scsi/lpfc/lpfc_sli.c b/trunk/drivers/scsi/lpfc/lpfc_sli.c
index d43faf34c1e2..74b67d98e952 100644
--- a/trunk/drivers/scsi/lpfc/lpfc_sli.c
+++ b/trunk/drivers/scsi/lpfc/lpfc_sli.c
@@ -438,12 +438,11 @@ lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
struct lpfc_rqe *temp_hrqe;
struct lpfc_rqe *temp_drqe;
struct lpfc_register doorbell;
- int put_index;
+ int put_index = hq->host_index;
/* sanity check on queue memory */
if (unlikely(!hq) || unlikely(!dq))
return -ENOMEM;
- put_index = hq->host_index;
temp_hrqe = hq->qe[hq->host_index].rqe;
temp_drqe = dq->qe[dq->host_index].rqe;
diff --git a/trunk/drivers/scsi/qla2xxx/qla_attr.c b/trunk/drivers/scsi/qla2xxx/qla_attr.c
index b3db9dcc2619..1d82eef4e1eb 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_attr.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_attr.c
@@ -1938,6 +1938,11 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
"Timer for the VP[%d] has stopped\n", vha->vp_idx);
}
+ /* No pending activities shall be there on the vha now */
+ if (ql2xextended_error_logging & ql_dbg_user)
+ msleep(random32()%10); /* Just to see if something falls on
+ * the net we have placed below */
+
BUG_ON(atomic_read(&vha->vref_count));
qla2x00_free_fcports(vha);
diff --git a/trunk/drivers/scsi/qla2xxx/qla_dbg.c b/trunk/drivers/scsi/qla2xxx/qla_dbg.c
index fbc305f1c87c..1626de52e32a 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_dbg.c
@@ -15,7 +15,6 @@
* | Mailbox commands | 0x115b | 0x111a-0x111b |
* | | | 0x112c-0x112e |
* | | | 0x113a |
- * | | | 0x1155-0x1158 |
* | Device Discovery | 0x2087 | 0x2020-0x2022, |
* | | | 0x2016 |
* | Queue Command and IO tracing | 0x3031 | 0x3006-0x300b |
@@ -402,7 +401,7 @@ qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
void *ring;
} aq, *aqp;
- if (!ha->tgt.atio_ring)
+ if (!ha->tgt.atio_q_length)
return ptr;
num_queues = 1;
diff --git a/trunk/drivers/scsi/qla2xxx/qla_def.h b/trunk/drivers/scsi/qla2xxx/qla_def.h
index 65c5ff75936b..c6509911772b 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_def.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_def.h
@@ -863,6 +863,7 @@ typedef struct {
#define MBX_1 BIT_1
#define MBX_0 BIT_0
+#define RNID_TYPE_SET_VERSION 0x9
#define RNID_TYPE_ASIC_TEMP 0xC
/*
diff --git a/trunk/drivers/scsi/qla2xxx/qla_gbl.h b/trunk/drivers/scsi/qla2xxx/qla_gbl.h
index b310fa97b545..eb3ca21a7f17 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_gbl.h
@@ -357,6 +357,9 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *, dma_addr_t, uint16_t , uint16_t *,
extern int
qla2x00_disable_fce_trace(scsi_qla_host_t *, uint64_t *, uint64_t *);
+extern int
+qla2x00_set_driver_version(scsi_qla_host_t *, char *);
+
extern int
qla2x00_read_sfp(scsi_qla_host_t *, dma_addr_t, uint8_t *,
uint16_t, uint16_t, uint16_t, uint16_t);
diff --git a/trunk/drivers/scsi/qla2xxx/qla_init.c b/trunk/drivers/scsi/qla2xxx/qla_init.c
index b59203393cb2..edf4d14a1335 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_init.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_init.c
@@ -619,6 +619,8 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha))
qla24xx_read_fcp_prio_cfg(vha);
+ qla2x00_set_driver_version(vha, QLA2XXX_VERSION);
+
return (rval);
}
@@ -1397,7 +1399,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
mq_size += ha->max_rsp_queues *
(rsp->length * sizeof(response_t));
}
- if (ha->tgt.atio_ring)
+ if (ha->tgt.atio_q_length)
mq_size += ha->tgt.atio_q_length * sizeof(request_t);
/* Allocate memory for Fibre Channel Event Buffer. */
if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
diff --git a/trunk/drivers/scsi/qla2xxx/qla_mbx.c b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
index 43345af56431..186dd59ce4fa 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/trunk/drivers/scsi/qla2xxx/qla_mbx.c
@@ -3866,6 +3866,64 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
return rval;
}
+int
+qla2x00_set_driver_version(scsi_qla_host_t *vha, char *version)
+{
+ int rval;
+ mbx_cmd_t mc;
+ mbx_cmd_t *mcp = &mc;
+ int len;
+ uint16_t dwlen;
+ uint8_t *str;
+ dma_addr_t str_dma;
+ struct qla_hw_data *ha = vha->hw;
+
+ if (!IS_FWI2_CAPABLE(ha) || IS_QLA82XX(ha))
+ return QLA_FUNCTION_FAILED;
+
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1155,
+ "Entered %s.\n", __func__);
+
+ str = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &str_dma);
+ if (!str) {
+ ql_log(ql_log_warn, vha, 0x1156,
+ "Failed to allocate driver version param.\n");
+ return QLA_MEMORY_ALLOC_FAILED;
+ }
+
+ memcpy(str, "\x7\x3\x11\x0", 4);
+ dwlen = str[0];
+ len = dwlen * sizeof(uint32_t) - 4;
+ memset(str + 4, 0, len);
+ if (len > strlen(version))
+ len = strlen(version);
+ memcpy(str + 4, version, len);
+
+ mcp->mb[0] = MBC_SET_RNID_PARAMS;
+ mcp->mb[1] = RNID_TYPE_SET_VERSION << 8 | dwlen;
+ mcp->mb[2] = MSW(LSD(str_dma));
+ mcp->mb[3] = LSW(LSD(str_dma));
+ mcp->mb[6] = MSW(MSD(str_dma));
+ mcp->mb[7] = LSW(MSD(str_dma));
+ mcp->out_mb = MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+ mcp->in_mb = MBX_0;
+ mcp->tov = MBX_TOV_SECONDS;
+ mcp->flags = 0;
+ rval = qla2x00_mailbox_command(vha, mcp);
+
+ if (rval != QLA_SUCCESS) {
+ ql_dbg(ql_dbg_mbx, vha, 0x1157,
+ "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
+ } else {
+ ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1158,
+ "Done %s.\n", __func__);
+ }
+
+ dma_pool_free(ha->s_dma_pool, str, str_dma);
+
+ return rval;
+}
+
static int
qla2x00_read_asic_temperature(scsi_qla_host_t *vha, uint16_t *temp)
{
diff --git a/trunk/drivers/scsi/qla2xxx/qla_version.h b/trunk/drivers/scsi/qla2xxx/qla_version.h
index ec54036d1e12..2b6e478d9e33 100644
--- a/trunk/drivers/scsi/qla2xxx/qla_version.h
+++ b/trunk/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
/*
* Driver version
*/
-#define QLA2XXX_VERSION "8.04.00.13-k"
+#define QLA2XXX_VERSION "8.04.00.08-k"
#define QLA_DRIVER_MAJOR_VER 8
#define QLA_DRIVER_MINOR_VER 4
diff --git a/trunk/drivers/scsi/st.c b/trunk/drivers/scsi/st.c
index 2a32036a9404..86974471af68 100644
--- a/trunk/drivers/scsi/st.c
+++ b/trunk/drivers/scsi/st.c
@@ -4112,10 +4112,6 @@ static int st_probe(struct device *dev)
tpnt->disk = disk;
disk->private_data = &tpnt->driver;
disk->queue = SDp->request_queue;
- /* SCSI tape doesn't register this gendisk via add_disk(). Manually
- * take queue reference that release_disk() expects. */
- if (!blk_get_queue(disk->queue))
- goto out_put_disk;
tpnt->driver = &st_template;
tpnt->device = SDp;
@@ -4189,7 +4185,7 @@ static int st_probe(struct device *dev)
idr_preload_end();
if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
- goto out_put_queue;
+ goto out_put_disk;
}
tpnt->index = error;
sprintf(disk->disk_name, "st%d", tpnt->index);
@@ -4215,8 +4211,6 @@ static int st_probe(struct device *dev)
spin_lock(&st_index_lock);
idr_remove(&st_index_idr, tpnt->index);
spin_unlock(&st_index_lock);
-out_put_queue:
- blk_put_queue(disk->queue);
out_put_disk:
put_disk(disk);
kfree(tpnt);
diff --git a/trunk/drivers/spi/Kconfig b/trunk/drivers/spi/Kconfig
index 2be0de920d67..f80eee74a311 100644
--- a/trunk/drivers/spi/Kconfig
+++ b/trunk/drivers/spi/Kconfig
@@ -55,7 +55,6 @@ comment "SPI Master Controller Drivers"
config SPI_ALTERA
tristate "Altera SPI Controller"
- depends on GENERIC_HARDIRQS
select SPI_BITBANG
help
This is the driver for the Altera SPI Controller.
@@ -311,7 +310,7 @@ config SPI_PXA2XX_DMA
config SPI_PXA2XX
tristate "PXA2xx SSP SPI master"
- depends on (ARCH_PXA || PCI || ACPI) && GENERIC_HARDIRQS
+ depends on ARCH_PXA || PCI || ACPI
select PXA_SSP if ARCH_PXA
help
This enables using a PXA2xx or Sodaville SSP port as a SPI master
diff --git a/trunk/drivers/spi/spi-bcm63xx.c b/trunk/drivers/spi/spi-bcm63xx.c
index d7df435d962e..9578af782a77 100644
--- a/trunk/drivers/spi/spi-bcm63xx.c
+++ b/trunk/drivers/spi/spi-bcm63xx.c
@@ -152,6 +152,7 @@ static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
static int bcm63xx_spi_setup(struct spi_device *spi)
{
struct bcm63xx_spi *bs;
+ int ret;
bs = spi_master_get_devdata(spi->master);
@@ -489,7 +490,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
default:
dev_err(dev, "unsupported MSG_CTL width: %d\n",
bs->msg_ctl_width);
- goto out_err;
+ goto out_clk_disable;
}
/* Initialize hardware */
diff --git a/trunk/drivers/spi/spi-mpc512x-psc.c b/trunk/drivers/spi/spi-mpc512x-psc.c
index 3e490ee7f275..89480b281d74 100644
--- a/trunk/drivers/spi/spi-mpc512x-psc.c
+++ b/trunk/drivers/spi/spi-mpc512x-psc.c
@@ -164,7 +164,7 @@ static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
for (i = count; i > 0; i--) {
data = tx_buf ? *tx_buf++ : 0;
- if (len == EOFBYTE && t->cs_change)
+ if (len == EOFBYTE)
setbits32(&fifo->txcmd, MPC512x_PSC_FIFO_EOF);
out_8(&fifo->txdata_8, data);
len--;
diff --git a/trunk/drivers/spi/spi-pxa2xx.c b/trunk/drivers/spi/spi-pxa2xx.c
index 810413883c79..90b27a3508a6 100644
--- a/trunk/drivers/spi/spi-pxa2xx.c
+++ b/trunk/drivers/spi/spi-pxa2xx.c
@@ -1168,6 +1168,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
master->dev.parent = &pdev->dev;
master->dev.of_node = pdev->dev.of_node;
+ ACPI_HANDLE_SET(&master->dev, ACPI_HANDLE(&pdev->dev));
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
diff --git a/trunk/drivers/spi/spi-s3c64xx.c b/trunk/drivers/spi/spi-s3c64xx.c
index 4188b2faac5c..e862ab8853aa 100644
--- a/trunk/drivers/spi/spi-s3c64xx.c
+++ b/trunk/drivers/spi/spi-s3c64xx.c
@@ -994,30 +994,25 @@ static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
{
struct s3c64xx_spi_driver_data *sdd = data;
struct spi_master *spi = sdd->master;
- unsigned int val, clr = 0;
+ unsigned int val;
- val = readl(sdd->regs + S3C64XX_SPI_STATUS);
+ val = readl(sdd->regs + S3C64XX_SPI_PENDING_CLR);
- if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
- clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
+ val &= S3C64XX_SPI_PND_RX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
+ S3C64XX_SPI_PND_TX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+
+ writel(val, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+
+ if (val & S3C64XX_SPI_PND_RX_OVERRUN_CLR)
dev_err(&spi->dev, "RX overrun\n");
- }
- if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
- clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
+ if (val & S3C64XX_SPI_PND_RX_UNDERRUN_CLR)
dev_err(&spi->dev, "RX underrun\n");
- }
- if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
- clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
+ if (val & S3C64XX_SPI_PND_TX_OVERRUN_CLR)
dev_err(&spi->dev, "TX overrun\n");
- }
- if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
- clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+ if (val & S3C64XX_SPI_PND_TX_UNDERRUN_CLR)
dev_err(&spi->dev, "TX underrun\n");
- }
-
- /* Clear the pending irq by setting and then clearing it */
- writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
- writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
return IRQ_HANDLED;
}
@@ -1041,13 +1036,9 @@ static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
writel(0, regs + S3C64XX_SPI_MODE_CFG);
writel(0, regs + S3C64XX_SPI_PACKET_CNT);
- /* Clear any irq pending bits, should set and clear the bits */
- val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
- S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
- S3C64XX_SPI_PND_TX_OVERRUN_CLR |
- S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
- writel(val, regs + S3C64XX_SPI_PENDING_CLR);
- writel(0, regs + S3C64XX_SPI_PENDING_CLR);
+ /* Clear any irq pending bits */
+ writel(readl(regs + S3C64XX_SPI_PENDING_CLR),
+ regs + S3C64XX_SPI_PENDING_CLR);
writel(0, regs + S3C64XX_SPI_SWAP_CFG);
diff --git a/trunk/drivers/spi/spi-tegra20-slink.c b/trunk/drivers/spi/spi-tegra20-slink.c
index a829563f4713..b8698b389ef3 100644
--- a/trunk/drivers/spi/spi-tegra20-slink.c
+++ b/trunk/drivers/spi/spi-tegra20-slink.c
@@ -858,6 +858,21 @@ static int tegra_slink_setup(struct spi_device *spi)
return 0;
}
+static int tegra_slink_prepare_transfer(struct spi_master *master)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ return pm_runtime_get_sync(tspi->dev);
+}
+
+static int tegra_slink_unprepare_transfer(struct spi_master *master)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
static int tegra_slink_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -870,12 +885,6 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
msg->status = 0;
msg->actual_length = 0;
- ret = pm_runtime_get_sync(tspi->dev);
- if (ret < 0) {
- dev_err(tspi->dev, "runtime get failed: %d\n", ret);
- goto done;
- }
-
single_xfer = list_is_singular(&msg->transfers);
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
INIT_COMPLETION(tspi->xfer_completion);
@@ -912,8 +921,6 @@ static int tegra_slink_transfer_one_message(struct spi_master *master,
exit:
tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
- pm_runtime_put(tspi->dev);
-done:
msg->status = ret;
spi_finalize_current_message(master);
return ret;
@@ -1141,7 +1148,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_slink_setup;
+ master->prepare_transfer_hardware = tegra_slink_prepare_transfer;
master->transfer_one_message = tegra_slink_transfer_one_message;
+ master->unprepare_transfer_hardware = tegra_slink_unprepare_transfer;
master->num_chipselect = MAX_CHIP_SELECT;
master->bus_num = -1;
diff --git a/trunk/drivers/spi/spi.c b/trunk/drivers/spi/spi.c
index 004b10f184d4..f996c600eb8c 100644
--- a/trunk/drivers/spi/spi.c
+++ b/trunk/drivers/spi/spi.c
@@ -543,16 +543,17 @@ static void spi_pump_messages(struct kthread_work *work)
/* Lock queue and check for queue work */
spin_lock_irqsave(&master->queue_lock, flags);
if (list_empty(&master->queue) || !master->running) {
- if (!master->busy) {
- spin_unlock_irqrestore(&master->queue_lock, flags);
- return;
+ if (master->busy && master->unprepare_transfer_hardware) {
+ ret = master->unprepare_transfer_hardware(master);
+ if (ret) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ dev_err(&master->dev,
+ "failed to unprepare transfer hardware\n");
+ return;
+ }
}
master->busy = false;
spin_unlock_irqrestore(&master->queue_lock, flags);
- if (master->unprepare_transfer_hardware &&
- master->unprepare_transfer_hardware(master))
- dev_err(&master->dev,
- "failed to unprepare transfer hardware\n");
return;
}
@@ -983,7 +984,7 @@ static void acpi_register_spi_devices(struct spi_master *master)
acpi_status status;
acpi_handle handle;
- handle = ACPI_HANDLE(master->dev.parent);
+ handle = ACPI_HANDLE(&master->dev);
if (!handle)
return;
diff --git a/trunk/drivers/ssb/driver_chipcommon_pmu.c b/trunk/drivers/ssb/driver_chipcommon_pmu.c
index 7b0bce936762..4c0f6d883dd3 100644
--- a/trunk/drivers/ssb/driver_chipcommon_pmu.c
+++ b/trunk/drivers/ssb/driver_chipcommon_pmu.c
@@ -675,32 +675,3 @@ u32 ssb_pmu_get_controlclock(struct ssb_chipcommon *cc)
return 0;
}
}
-
-void ssb_pmu_spuravoid_pllupdate(struct ssb_chipcommon *cc, int spuravoid)
-{
- u32 pmu_ctl = 0;
-
- switch (cc->dev->bus->chip_id) {
- case 0x4322:
- ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL0, 0x11100070);
- ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL1, 0x1014140a);
- ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL5, 0x88888854);
- if (spuravoid == 1)
- ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05201828);
- else
- ssb_chipco_pll_write(cc, SSB_PMU1_PLLCTL2, 0x05001828);
- pmu_ctl = SSB_CHIPCO_PMU_CTL_PLL_UPD;
- break;
- case 43222:
- /* TODO: BCM43222 requires updating PLLs too */
- return;
- default:
- ssb_printk(KERN_ERR PFX
- "Unknown spuravoidance settings for chip 0x%04X, not changing PLL\n",
- cc->dev->bus->chip_id);
- return;
- }
-
- chipco_set32(cc, SSB_CHIPCO_PMU_CTL, pmu_ctl);
-}
-EXPORT_SYMBOL_GPL(ssb_pmu_spuravoid_pllupdate);
diff --git a/trunk/drivers/target/target_core_alua.c b/trunk/drivers/target/target_core_alua.c
index cbe48ab41745..ff1c5ee352cb 100644
--- a/trunk/drivers/target/target_core_alua.c
+++ b/trunk/drivers/target/target_core_alua.c
@@ -409,7 +409,6 @@ static inline int core_alua_state_standby(
case REPORT_LUNS:
case RECEIVE_DIAGNOSTIC:
case SEND_DIAGNOSTIC:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
@@ -452,7 +451,6 @@ static inline int core_alua_state_unavailable(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
@@ -493,7 +491,6 @@ static inline int core_alua_state_transition(
switch (cdb[0]) {
case INQUIRY:
case REPORT_LUNS:
- return 0;
case MAINTENANCE_IN:
switch (cdb[1] & 0x1f) {
case MI_REPORT_TARGET_PGS:
diff --git a/trunk/drivers/tty/mxser.c b/trunk/drivers/tty/mxser.c
index 302909ccf183..484b6a3c9b03 100644
--- a/trunk/drivers/tty/mxser.c
+++ b/trunk/drivers/tty/mxser.c
@@ -2643,9 +2643,9 @@ static int mxser_probe(struct pci_dev *pdev,
mxvar_sdriver, brd->idx + i, &pdev->dev);
if (IS_ERR(tty_dev)) {
retval = PTR_ERR(tty_dev);
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
+ brd->idx + i);
goto err_relbrd;
}
}
@@ -2751,9 +2751,9 @@ static int __init mxser_module_init(void)
tty_dev = tty_port_register_device(&brd->ports[i].port,
mxvar_sdriver, brd->idx + i, NULL);
if (IS_ERR(tty_dev)) {
- for (; i > 0; i--)
+ for (i--; i >= 0; i--)
tty_unregister_device(mxvar_sdriver,
- brd->idx + i - 1);
+ brd->idx + i);
for (i = 0; i < brd->info->nports; i++)
tty_port_destroy(&brd->ports[i].port);
free_irq(brd->irq, brd);
diff --git a/trunk/drivers/tty/serial/8250/8250_pnp.c b/trunk/drivers/tty/serial/8250/8250_pnp.c
index 35d9ab95c5cb..b3455a970a1d 100644
--- a/trunk/drivers/tty/serial/8250/8250_pnp.c
+++ b/trunk/drivers/tty/serial/8250/8250_pnp.c
@@ -429,6 +429,7 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
{
struct uart_8250_port uart;
int ret, line, flags = dev_id->driver_data;
+ struct resource *res = NULL;
if (flags & UNKNOWN_DEV) {
ret = serial_pnp_guess_board(dev);
@@ -439,11 +440,12 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id)
memset(&uart, 0, sizeof(uart));
if (pnp_irq_valid(dev, 0))
uart.port.irq = pnp_irq(dev, 0);
- if ((flags & CIR_PORT) && pnp_port_valid(dev, 2)) {
- uart.port.iobase = pnp_port_start(dev, 2);
- uart.port.iotype = UPIO_PORT;
- } else if (pnp_port_valid(dev, 0)) {
- uart.port.iobase = pnp_port_start(dev, 0);
+ if ((flags & CIR_PORT) && pnp_port_valid(dev, 2))
+ res = pnp_get_resource(dev, IORESOURCE_IO, 2);
+ else if (pnp_port_valid(dev, 0))
+ res = pnp_get_resource(dev, IORESOURCE_IO, 0);
+ if (pnp_resource_enabled(res)) {
+ uart.port.iobase = res->start;
uart.port.iotype = UPIO_PORT;
} else if (pnp_mem_valid(dev, 0)) {
uart.port.mapbase = pnp_mem_start(dev, 0);
diff --git a/trunk/drivers/tty/serial/omap-serial.c b/trunk/drivers/tty/serial/omap-serial.c
index 30d4f7a783cd..4dc41408ecb7 100644
--- a/trunk/drivers/tty/serial/omap-serial.c
+++ b/trunk/drivers/tty/serial/omap-serial.c
@@ -886,17 +886,6 @@ serial_omap_set_termios(struct uart_port *port, struct ktermios *termios,
serial_out(up, UART_MCR, up->mcr | UART_MCR_TCRTLR);
/* FIFO ENABLE, DMA MODE */
- up->scr |= OMAP_UART_SCR_RX_TRIG_GRANU1_MASK;
- /*
- * NOTE: Setting OMAP_UART_SCR_RX_TRIG_GRANU1_MASK
- * sets Enables the granularity of 1 for TRIGGER RX
- * level. Along with setting RX FIFO trigger level
- * to 1 (as noted below, 16 characters) and TLR[3:0]
- * to zero this will result RX FIFO threshold level
- * to 1 character, instead of 16 as noted in comment
- * below.
- */
-
/* Set receive FIFO threshold to 16 characters and
* transmit FIFO threshold to 16 spaces
*/
diff --git a/trunk/drivers/usb/core/port.c b/trunk/drivers/usb/core/port.c
index 65d4e55552c6..797f9d514732 100644
--- a/trunk/drivers/usb/core/port.c
+++ b/trunk/drivers/usb/core/port.c
@@ -67,6 +67,7 @@ static void usb_port_device_release(struct device *dev)
{
struct usb_port *port_dev = to_usb_port(dev);
+ dev_pm_qos_hide_flags(dev);
kfree(port_dev);
}
diff --git a/trunk/drivers/vfio/pci/vfio_pci.c b/trunk/drivers/vfio/pci/vfio_pci.c
index 7abc5c81af2c..8189cb6a86af 100644
--- a/trunk/drivers/vfio/pci/vfio_pci.c
+++ b/trunk/drivers/vfio/pci/vfio_pci.c
@@ -346,7 +346,6 @@ static long vfio_pci_ioctl(void *device_data,
if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
size_t size;
- int max = vfio_pci_get_irq_count(vdev, hdr.index);
if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
size = sizeof(uint8_t);
@@ -356,7 +355,7 @@ static long vfio_pci_ioctl(void *device_data,
return -EINVAL;
if (hdr.argsz - minsz < hdr.count * size ||
- hdr.start >= max || hdr.start + hdr.count > max)
+ hdr.count > vfio_pci_get_irq_count(vdev, hdr.index))
return -EINVAL;
data = memdup_user((void __user *)(arg + minsz),
diff --git a/trunk/drivers/vhost/tcm_vhost.c b/trunk/drivers/vhost/tcm_vhost.c
index 957a0b98a5d9..2968b4934659 100644
--- a/trunk/drivers/vhost/tcm_vhost.c
+++ b/trunk/drivers/vhost/tcm_vhost.c
@@ -74,8 +74,9 @@ enum {
struct vhost_scsi {
/* Protected by vhost_scsi->dev.mutex */
- struct tcm_vhost_tpg **vs_tpg;
+ struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET];
char vs_vhost_wwpn[TRANSPORT_IQN_LEN];
+ bool vs_endpoint;
struct vhost_dev dev;
struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ];
@@ -578,27 +579,9 @@ static void tcm_vhost_submission_work(struct work_struct *work)
}
}
-static void vhost_scsi_send_bad_target(struct vhost_scsi *vs,
- struct vhost_virtqueue *vq, int head, unsigned out)
-{
- struct virtio_scsi_cmd_resp __user *resp;
- struct virtio_scsi_cmd_resp rsp;
- int ret;
-
- memset(&rsp, 0, sizeof(rsp));
- rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
- resp = vq->iov[out].iov_base;
- ret = __copy_to_user(resp, &rsp, sizeof(rsp));
- if (!ret)
- vhost_add_used_and_signal(&vs->dev, vq, head, 0);
- else
- pr_err("Faulted on virtio_scsi_cmd_resp\n");
-}
-
static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
struct vhost_virtqueue *vq)
{
- struct tcm_vhost_tpg **vs_tpg;
struct virtio_scsi_cmd_req v_req;
struct tcm_vhost_tpg *tv_tpg;
struct tcm_vhost_cmd *tv_cmd;
@@ -607,16 +590,8 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
int head, ret;
u8 target;
- /*
- * We can handle the vq only after the endpoint is setup by calling the
- * VHOST_SCSI_SET_ENDPOINT ioctl.
- *
- * TODO: Check that we are running from vhost_worker which acts
- * as read-side critical section for vhost kind of RCU.
- * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h
- */
- vs_tpg = rcu_dereference_check(vq->private_data, 1);
- if (!vs_tpg)
+ /* Must use ioctl VHOST_SCSI_SET_ENDPOINT */
+ if (unlikely(!vs->vs_endpoint))
return;
mutex_lock(&vq->mutex);
@@ -686,11 +661,23 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
/* Extract the tpgt */
target = v_req.lun[1];
- tv_tpg = ACCESS_ONCE(vs_tpg[target]);
+ tv_tpg = vs->vs_tpg[target];
/* Target does not exist, fail the request */
if (unlikely(!tv_tpg)) {
- vhost_scsi_send_bad_target(vs, vq, head, out);
+ struct virtio_scsi_cmd_resp __user *resp;
+ struct virtio_scsi_cmd_resp rsp;
+
+ memset(&rsp, 0, sizeof(rsp));
+ rsp.response = VIRTIO_SCSI_S_BAD_TARGET;
+ resp = vq->iov[out].iov_base;
+ ret = __copy_to_user(resp, &rsp, sizeof(rsp));
+ if (!ret)
+ vhost_add_used_and_signal(&vs->dev,
+ vq, head, 0);
+ else
+ pr_err("Faulted on virtio_scsi_cmd_resp\n");
+
continue;
}
@@ -703,13 +690,22 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
if (IS_ERR(tv_cmd)) {
vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",
PTR_ERR(tv_cmd));
- goto err_cmd;
+ break;
}
pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"
": %d\n", tv_cmd, exp_data_len, data_direction);
tv_cmd->tvc_vhost = vs;
tv_cmd->tvc_vq = vq;
+
+ if (unlikely(vq->iov[out].iov_len !=
+ sizeof(struct virtio_scsi_cmd_resp))) {
+ vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu"
+ " bytes, out: %d, in: %d\n",
+ vq->iov[out].iov_len, out, in);
+ break;
+ }
+
tv_cmd->tvc_resp = vq->iov[out].iov_base;
/*
@@ -729,7 +725,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
scsi_command_size(tv_cmd->tvc_cdb),
TCM_VHOST_MAX_CDB_SIZE);
- goto err_free;
+ break; /* TODO */
}
tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF;
@@ -742,7 +738,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
data_direction == DMA_TO_DEVICE);
if (unlikely(ret)) {
vq_err(vq, "Failed to map iov to sgl\n");
- goto err_free;
+ break; /* TODO */
}
}
@@ -763,13 +759,6 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,
}
mutex_unlock(&vq->mutex);
- return;
-
-err_free:
- vhost_scsi_free_cmd(tv_cmd);
-err_cmd:
- vhost_scsi_send_bad_target(vs, vq, head, out);
- mutex_unlock(&vq->mutex);
}
static void vhost_scsi_ctl_handle_kick(struct vhost_work *work)
@@ -791,20 +780,6 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)
vhost_scsi_handle_vq(vs, vq);
}
-static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
-{
- vhost_poll_flush(&vs->dev.vqs[index].poll);
-}
-
-static void vhost_scsi_flush(struct vhost_scsi *vs)
-{
- int i;
-
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
- vhost_scsi_flush_vq(vs, i);
- vhost_work_flush(&vs->dev, &vs->vs_completion_work);
-}
-
/*
* Called from vhost_scsi_ioctl() context to walk the list of available
* tcm_vhost_tpg with an active struct tcm_vhost_nexus
@@ -815,10 +790,8 @@ static int vhost_scsi_set_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- struct tcm_vhost_tpg **vs_tpg;
- struct vhost_virtqueue *vq;
- int index, ret, i, len;
bool match = false;
+ int index, ret;
mutex_lock(&vs->dev.mutex);
/* Verify that ring has been setup correctly. */
@@ -830,15 +803,6 @@ static int vhost_scsi_set_endpoint(
}
}
- len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET;
- vs_tpg = kzalloc(len, GFP_KERNEL);
- if (!vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- return -ENOMEM;
- }
- if (vs->vs_tpg)
- memcpy(vs_tpg, vs->vs_tpg, len);
-
mutex_lock(&tcm_vhost_mutex);
list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {
mutex_lock(&tv_tpg->tv_tpg_mutex);
@@ -853,15 +817,14 @@ static int vhost_scsi_set_endpoint(
tv_tport = tv_tpg->tport;
if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) {
- if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {
+ if (vs->vs_tpg[tv_tpg->tport_tpgt]) {
mutex_unlock(&tv_tpg->tv_tpg_mutex);
mutex_unlock(&tcm_vhost_mutex);
mutex_unlock(&vs->dev.mutex);
- kfree(vs_tpg);
return -EEXIST;
}
tv_tpg->tv_tpg_vhost_count++;
- vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
+ vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;
smp_mb__after_atomic_inc();
match = true;
}
@@ -872,27 +835,12 @@ static int vhost_scsi_set_endpoint(
if (match) {
memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,
sizeof(vs->vs_vhost_wwpn));
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
- /* Flushing the vhost_work acts as synchronize_rcu */
- mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, vs_tpg);
- vhost_init_used(vq);
- mutex_unlock(&vq->mutex);
- }
+ vs->vs_endpoint = true;
ret = 0;
} else {
ret = -EEXIST;
}
- /*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
- */
- vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
- vs->vs_tpg = vs_tpg;
-
mutex_unlock(&vs->dev.mutex);
return ret;
}
@@ -903,8 +851,6 @@ static int vhost_scsi_clear_endpoint(
{
struct tcm_vhost_tport *tv_tport;
struct tcm_vhost_tpg *tv_tpg;
- struct vhost_virtqueue *vq;
- bool match = false;
int index, ret, i;
u8 target;
@@ -916,14 +862,9 @@ static int vhost_scsi_clear_endpoint(
goto err_dev;
}
}
-
- if (!vs->vs_tpg) {
- mutex_unlock(&vs->dev.mutex);
- return 0;
- }
-
for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {
target = i;
+
tv_tpg = vs->vs_tpg[target];
if (!tv_tpg)
continue;
@@ -945,27 +886,10 @@ static int vhost_scsi_clear_endpoint(
}
tv_tpg->tv_tpg_vhost_count--;
vs->vs_tpg[target] = NULL;
- match = true;
+ vs->vs_endpoint = false;
mutex_unlock(&tv_tpg->tv_tpg_mutex);
}
- if (match) {
- for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
- vq = &vs->vqs[i];
- /* Flushing the vhost_work acts as synchronize_rcu */
- mutex_lock(&vq->mutex);
- rcu_assign_pointer(vq->private_data, NULL);
- mutex_unlock(&vq->mutex);
- }
- }
- /*
- * Act as synchronize_rcu to make sure access to
- * old vs->vs_tpg is finished.
- */
- vhost_scsi_flush(vs);
- kfree(vs->vs_tpg);
- vs->vs_tpg = NULL;
mutex_unlock(&vs->dev.mutex);
-
return 0;
err_tpg:
@@ -975,24 +899,6 @@ static int vhost_scsi_clear_endpoint(
return ret;
}
-static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
-{
- if (features & ~VHOST_SCSI_FEATURES)
- return -EOPNOTSUPP;
-
- mutex_lock(&vs->dev.mutex);
- if ((features & (1 << VHOST_F_LOG_ALL)) &&
- !vhost_log_access_ok(&vs->dev)) {
- mutex_unlock(&vs->dev.mutex);
- return -EFAULT;
- }
- vs->dev.acked_features = features;
- smp_wmb();
- vhost_scsi_flush(vs);
- mutex_unlock(&vs->dev.mutex);
- return 0;
-}
-
static int vhost_scsi_open(struct inode *inode, struct file *f)
{
struct vhost_scsi *s;
@@ -1033,6 +939,38 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
return 0;
}
+static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index)
+{
+ vhost_poll_flush(&vs->dev.vqs[index].poll);
+}
+
+static void vhost_scsi_flush(struct vhost_scsi *vs)
+{
+ int i;
+
+ for (i = 0; i < VHOST_SCSI_MAX_VQ; i++)
+ vhost_scsi_flush_vq(vs, i);
+ vhost_work_flush(&vs->dev, &vs->vs_completion_work);
+}
+
+static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
+{
+ if (features & ~VHOST_SCSI_FEATURES)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&vs->dev.mutex);
+ if ((features & (1 << VHOST_F_LOG_ALL)) &&
+ !vhost_log_access_ok(&vs->dev)) {
+ mutex_unlock(&vs->dev.mutex);
+ return -EFAULT;
+ }
+ vs->dev.acked_features = features;
+ smp_wmb();
+ vhost_scsi_flush(vs);
+ mutex_unlock(&vs->dev.mutex);
+ return 0;
+}
+
static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,
unsigned long arg)
{
diff --git a/trunk/drivers/video/cirrusfb.c b/trunk/drivers/video/cirrusfb.c
index 97db3ba8f237..c3dbbe6e3acf 100644
--- a/trunk/drivers/video/cirrusfb.c
+++ b/trunk/drivers/video/cirrusfb.c
@@ -53,6 +53,12 @@
#ifdef CONFIG_AMIGA
#include
#endif
+#ifdef CONFIG_PPC_PREP
+#include
+#define isPReP machine_is(prep)
+#else
+#define isPReP 0
+#endif
#include