diff --git a/[refs] b/[refs] index 72ff488bbd9d..40954c7acefc 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 3ddc0e1a7fd2fc222a6c87654af1cf059acdd1ec +refs/heads/master: 2b55d10c46815d9660c0f1bc6044f7019ff384aa diff --git a/trunk/Documentation/device-mapper/dm-raid.txt b/trunk/Documentation/device-mapper/dm-raid.txt index 56fb62b09fc5..728c38c242d6 100644 --- a/trunk/Documentation/device-mapper/dm-raid.txt +++ b/trunk/Documentation/device-mapper/dm-raid.txt @@ -141,4 +141,3 @@ Version History 1.2.0 Handle creation of arrays that contain failed devices. 1.3.0 Added support for RAID 10 1.3.1 Allow device replacement/rebuild for RAID 10 -1.3.2 Fix/improve redundancy checking for RAID10 diff --git a/trunk/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt b/trunk/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt index bc50899e0c81..3a268127b054 100644 --- a/trunk/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt +++ b/trunk/Documentation/devicetree/bindings/pinctrl/atmel,at91-pinctrl.txt @@ -81,8 +81,7 @@ PA31 TXD4 Required properties for pin configuration node: - atmel,pins: 4 integers array, represents a group of pins mux and config setting. The format is atmel,pins = . - The PERIPH 0 means gpio, PERIPH 1 is periph A, PERIPH 2 is periph B... - PIN_BANK 0 is pioA, PIN_BANK 1 is pioB... + The PERIPH 0 means gpio. Bits used for CONFIG: PULL_UP (1 << 0): indicate this pin need a pull up. @@ -127,7 +126,7 @@ pinctrl@fffff400 { pinctrl_dbgu: dbgu-0 { atmel,pins = <1 14 0x1 0x0 /* PB14 periph A */ - 1 15 0x1 0x1>; /* PB15 periph A with pullup */ + 1 15 0x1 0x1>; /* PB15 periph with pullup */ }; }; }; diff --git a/trunk/Documentation/filesystems/f2fs.txt b/trunk/Documentation/filesystems/f2fs.txt index dcf338e62b71..8fbd8b46ee34 100644 --- a/trunk/Documentation/filesystems/f2fs.txt +++ b/trunk/Documentation/filesystems/f2fs.txt @@ -175,9 +175,9 @@ consists of multiple segments as described below. align with the zone size <-| |-> align with the segment size _________________________________________________________________________ - | | | Segment | Node | Segment | | - | Superblock | Checkpoint | Info. | Address | Summary | Main | - | (SB) | (CP) | Table (SIT) | Table (NAT) | Area (SSA) | | + | | | Node | Segment | Segment | | + | Superblock | Checkpoint | Address | Info. | Summary | Main | + | (SB) | (CP) | Table (NAT) | Table (SIT) | Area (SSA) | | |____________|_____2______|______N______|______N______|______N_____|__N___| . . . . @@ -200,14 +200,14 @@ consists of multiple segments as described below. : It contains file system information, bitmaps for valid NAT/SIT sets, orphan inode lists, and summary entries of current active segments. -- Segment Information Table (SIT) - : It contains segment information such as valid block count and bitmap for the - validity of all the blocks. - - Node Address Table (NAT) : It is composed of a block address table for all the node blocks stored in Main area. +- Segment Information Table (SIT) + : It contains segment information such as valid block count and bitmap for the + validity of all the blocks. + - Segment Summary Area (SSA) : It contains summary entries which contains the owner information of all the data and node blocks stored in Main area. @@ -236,13 +236,13 @@ For file system consistency, each CP points to which NAT and SIT copies are valid, as shown as below. +--------+----------+---------+ - | CP | SIT | NAT | + | CP | NAT | SIT | +--------+----------+---------+ . . . . . . . . . . . . +-------+-------+--------+--------+--------+--------+ - | CP #0 | CP #1 | SIT #0 | SIT #1 | NAT #0 | NAT #1 | + | CP #0 | CP #1 | NAT #0 | NAT #1 | SIT #0 | SIT #1 | +-------+-------+--------+--------+--------+--------+ | ^ ^ | | | diff --git a/trunk/Documentation/hid/hid-sensor.txt b/trunk/Documentation/hid/hid-sensor.txt old mode 100644 new mode 100755 diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 6c723811c0a0..363e348bff9b 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. real-time workloads. It can also improve energy efficiency for asymmetric multiprocessors. - rcu_nocb_poll [KNL,BOOT] + rcu_nocbs_poll [KNL,BOOT] Rather than requiring that offloaded CPUs (specified by rcu_nocbs= above) explicitly awaken the corresponding "rcuoN" kthreads, diff --git a/trunk/Documentation/virtual/kvm/api.txt b/trunk/Documentation/virtual/kvm/api.txt index c25439a58274..a4df5535996b 100644 --- a/trunk/Documentation/virtual/kvm/api.txt +++ b/trunk/Documentation/virtual/kvm/api.txt @@ -293,7 +293,7 @@ kvm_run' (see below). 4.11 KVM_GET_REGS Capability: basic -Architectures: all except ARM +Architectures: all Type: vcpu ioctl Parameters: struct kvm_regs (out) Returns: 0 on success, -1 on error @@ -314,7 +314,7 @@ struct kvm_regs { 4.12 KVM_SET_REGS Capability: basic -Architectures: all except ARM +Architectures: all Type: vcpu ioctl Parameters: struct kvm_regs (in) Returns: 0 on success, -1 on error @@ -600,7 +600,7 @@ struct kvm_fpu { 4.24 KVM_CREATE_IRQCHIP Capability: KVM_CAP_IRQCHIP -Architectures: x86, ia64, ARM +Architectures: x86, ia64 Type: vm ioctl Parameters: none Returns: 0 on success, -1 on error @@ -608,39 +608,21 @@ Returns: 0 on success, -1 on error Creates an interrupt controller model in the kernel. On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23 -only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM, a GIC is -created. +only go to the IOAPIC. On ia64, a IOSAPIC is created. 4.25 KVM_IRQ_LINE Capability: KVM_CAP_IRQCHIP -Architectures: x86, ia64, arm +Architectures: x86, ia64 Type: vm ioctl Parameters: struct kvm_irq_level Returns: 0 on success, -1 on error Sets the level of a GSI input to the interrupt controller model in the kernel. -On some architectures it is required that an interrupt controller model has -been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered -interrupts require the level to be set to 1 and then back to 0. - -ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip -(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for -specific cpus. The irq field is interpreted like this: - -  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 | - field: | irq_type | vcpu_index | irq_id | - -The irq_type field has the following values: -- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ -- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.) - (the vcpu_index field is ignored) -- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.) - -(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs) - -In both cases, level is used to raise/lower the line. +Requires that an interrupt controller model has been previously created with +KVM_CREATE_IRQCHIP. Note that edge-triggered interrupts require the level +to be set to 1 and then back to 0. struct kvm_irq_level { union { @@ -1793,27 +1775,6 @@ registers, find a list below: PPC | KVM_REG_PPC_VPA_DTL | 128 PPC | KVM_REG_PPC_EPCR | 32 -ARM registers are mapped using the lower 32 bits. The upper 16 of that -is the register group type, or coprocessor number: - -ARM core registers have the following id bit patterns: - 0x4002 0000 0010 - -ARM 32-bit CP15 registers have the following id bit patterns: - 0x4002 0000 000F - -ARM 64-bit CP15 registers have the following id bit patterns: - 0x4003 0000 000F - -ARM CCSIDR registers are demultiplexed by CSSELR value: - 0x4002 0000 0011 00 - -ARM 32-bit VFP control registers have the following id bit patterns: - 0x4002 0000 0012 1 - -ARM 64-bit FP registers have the following id bit patterns: - 0x4002 0000 0012 0 - 4.69 KVM_GET_ONE_REG Capability: KVM_CAP_ONE_REG @@ -2166,50 +2127,6 @@ written, then `n_invalid' invalid entries, invalidating any previously valid entries found. -4.77 KVM_ARM_VCPU_INIT - -Capability: basic -Architectures: arm -Type: vcpu ioctl -Parameters: struct struct kvm_vcpu_init (in) -Returns: 0 on success; -1 on error -Errors: -  EINVAL:    the target is unknown, or the combination of features is invalid. -  ENOENT:    a features bit specified is unknown. - -This tells KVM what type of CPU to present to the guest, and what -optional features it should have.  This will cause a reset of the cpu -registers to their initial values.  If this is not called, KVM_RUN will -return ENOEXEC for that vcpu. - -Note that because some registers reflect machine topology, all vcpus -should be created before this ioctl is invoked. - -Possible features: - - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. - Depends on KVM_CAP_ARM_PSCI. - - -4.78 KVM_GET_REG_LIST - -Capability: basic -Architectures: arm -Type: vcpu ioctl -Parameters: struct kvm_reg_list (in/out) -Returns: 0 on success; -1 on error -Errors: -  E2BIG:     the reg index list is too big to fit in the array specified by -             the user (the number required will be written into n). - -struct kvm_reg_list { - __u64 n; /* number of registers in reg[] */ - __u64 reg[0]; -}; - -This ioctl returns the guest registers that are supported for the -KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. - - 5. The kvm_run structure ------------------------ diff --git a/trunk/Documentation/x86/boot.txt b/trunk/Documentation/x86/boot.txt index e540fd67f767..406d82d5d2bb 100644 --- a/trunk/Documentation/x86/boot.txt +++ b/trunk/Documentation/x86/boot.txt @@ -57,10 +57,6 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover protocol entry point. -Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields - to struct boot_params for for loading bzImage and ramdisk - above 4G in 64bit. - **** MEMORY LAYOUT The traditional memory map for the kernel loader, used for Image or @@ -186,7 +182,7 @@ Offset Proto Name Meaning 0230/4 2.05+ kernel_alignment Physical addr alignment required for kernel 0234/1 2.05+ relocatable_kernel Whether kernel is relocatable or not 0235/1 2.10+ min_alignment Minimum alignment, as a power of two -0236/2 2.12+ xloadflags Boot protocol option flags +0236/2 N/A pad3 Unused 0238/4 2.06+ cmdline_size Maximum size of the kernel command line 023C/4 2.07+ hardware_subarch Hardware subarchitecture 0240/8 2.07+ hardware_subarch_data Subarchitecture-specific data @@ -586,27 +582,6 @@ Protocol: 2.10+ misaligned kernel. Therefore, a loader should typically try each power-of-two alignment from kernel_alignment down to this alignment. -Field name: xloadflags -Type: read -Offset/size: 0x236/2 -Protocol: 2.12+ - - This field is a bitmask. - - Bit 0 (read): XLF_KERNEL_64 - - If 1, this kernel has the legacy 64-bit entry point at 0x200. - - Bit 1 (read): XLF_CAN_BE_LOADED_ABOVE_4G - - If 1, kernel/boot_params/cmdline/ramdisk can be above 4G. - - Bit 2 (read): XLF_EFI_HANDOVER_32 - - If 1, the kernel supports the 32-bit EFI handoff entry point - given at handover_offset. - - Bit 3 (read): XLF_EFI_HANDOVER_64 - - If 1, the kernel supports the 64-bit EFI handoff entry point - given at handover_offset + 0x200. - Field name: cmdline_size Type: read Offset/size: 0x238/4 diff --git a/trunk/Documentation/x86/zero-page.txt b/trunk/Documentation/x86/zero-page.txt index 199f453cb4de..cf5437deda81 100644 --- a/trunk/Documentation/x86/zero-page.txt +++ b/trunk/Documentation/x86/zero-page.txt @@ -19,9 +19,6 @@ Offset Proto Name Meaning 090/010 ALL hd1_info hd1 disk parameter, OBSOLETE!! 0A0/010 ALL sys_desc_table System description table (struct sys_desc_table) 0B0/010 ALL olpc_ofw_header OLPC's OpenFirmware CIF and friends -0C0/004 ALL ext_ramdisk_image ramdisk_image high 32bits -0C4/004 ALL ext_ramdisk_size ramdisk_size high 32bits -0C8/004 ALL ext_cmd_line_ptr cmd_line_ptr high 32bits 140/080 ALL edid_info Video mode setup (struct edid_info) 1C0/020 ALL efi_info EFI 32 information (struct efi_info) 1E0/004 ALL alk_mem_k Alternative mem check, in KB @@ -30,7 +27,6 @@ Offset Proto Name Meaning 1E9/001 ALL eddbuf_entries Number of entries in eddbuf (below) 1EA/001 ALL edd_mbr_sig_buf_entries Number of entries in edd_mbr_sig_buffer (below) -1EF/001 ALL sentinel Used to detect broken bootloaders 290/040 ALL edd_mbr_sig_buffer EDD MBR signatures 2D0/A00 ALL e820_map E820 memory map table (array of struct e820entry) diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index cfceb75af8c8..915564eda145 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -228,7 +228,7 @@ S: Maintained F: drivers/platform/x86/acerhdf.c ACER WMI LAPTOP EXTRAS -M: "Lee, Chun-Yi" +M: Joey Lee L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/acer-wmi.c @@ -648,7 +648,7 @@ F: arch/arm/ ARM SUB-ARCHITECTURES L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) -S: Maintained +S: MAINTAINED F: arch/arm/mach-*/ F: arch/arm/plat-*/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc.git @@ -1351,14 +1351,6 @@ W: http://wireless.kernel.org/en/users/Drivers/ath9k S: Supported F: drivers/net/wireless/ath/ath9k/ -WILOCITY WIL6210 WIRELESS DRIVER -M: Vladimir Kondratiev -L: linux-wireless@vger.kernel.org -L: wil6210@qca.qualcomm.com -S: Supported -W: http://wireless.kernel.org/en/users/Drivers/wil6210 -F: drivers/net/wireless/ath/wil6210/ - CARL9170 LINUX COMMUNITY WIRELESS DRIVER M: Christian Lamparter L: linux-wireless@vger.kernel.org @@ -1489,7 +1481,7 @@ AVR32 ARCHITECTURE M: Haavard Skinnemoen M: Hans-Christian Egtvedt W: http://www.atmel.com/products/AVR32/ -W: http://mirror.egtvedt.no/avr32linux.org/ +W: http://avr32linux.org/ W: http://avrfreaks.net/ S: Maintained F: arch/avr32/ @@ -1972,9 +1964,9 @@ S: Maintained F: drivers/usb/host/ohci-ep93xx.c CIRRUS LOGIC CS4270 SOUND DRIVER -M: Timur Tabi +M: Timur Tabi L: alsa-devel@alsa-project.org (moderated for non-subscribers) -S: Odd Fixes +S: Supported F: sound/soc/codecs/cs4270* CLEANCACHE API @@ -2966,7 +2958,7 @@ S: Maintained F: drivers/net/ethernet/i825xx/eexpress.* ETHERNET BRIDGE -M: Stephen Hemminger +M: Stephen Hemminger L: bridge@lists.linux-foundation.org L: netdev@vger.kernel.org W: http://www.linuxfoundation.org/en/Net:Bridge @@ -3191,9 +3183,9 @@ F: include/uapi/video/ F: include/uapi/linux/fb.h FREESCALE DIU FRAMEBUFFER DRIVER -M: Timur Tabi +M: Timur Tabi L: linux-fbdev@vger.kernel.org -S: Maintained +S: Supported F: drivers/video/fsl-diu-fb.* FREESCALE DMA DRIVER @@ -3228,8 +3220,9 @@ F: drivers/net/ethernet/freescale/fs_enet/ F: include/linux/fs_enet_pd.h FREESCALE QUICC ENGINE LIBRARY +M: Timur Tabi L: linuxppc-dev@lists.ozlabs.org -S: Orphan +S: Supported F: arch/powerpc/sysdev/qe_lib/ F: arch/powerpc/include/asm/*qe.h @@ -3248,16 +3241,16 @@ S: Maintained F: drivers/net/ethernet/freescale/ucc_geth* FREESCALE QUICC ENGINE UCC UART DRIVER -M: Timur Tabi +M: Timur Tabi L: linuxppc-dev@lists.ozlabs.org -S: Maintained +S: Supported F: drivers/tty/serial/ucc_uart.c FREESCALE SOC SOUND DRIVERS -M: Timur Tabi +M: Timur Tabi L: alsa-devel@alsa-project.org (moderated for non-subscribers) L: linuxppc-dev@lists.ozlabs.org -S: Maintained +S: Supported F: sound/soc/fsl/fsl* F: sound/soc/fsl/mpc8610_hpcd.c @@ -4481,15 +4474,6 @@ F: arch/s390/include/asm/kvm* F: arch/s390/kvm/ F: drivers/s390/kvm/ -KERNEL VIRTUAL MACHINE (KVM) FOR ARM -M: Christoffer Dall -L: kvmarm@lists.cs.columbia.edu -W: http://systems.cs.columbia.edu/projects/kvm-arm -S: Maintained -F: arch/arm/include/uapi/asm/kvm* -F: arch/arm/include/asm/kvm* -F: arch/arm/kvm/ - KEXEC M: Eric Biederman W: http://kernel.org/pub/linux/utils/kernel/kexec/ @@ -4914,7 +4898,7 @@ S: Maintained MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2) M: Mirko Lindner -M: Stephen Hemminger +M: Stephen Hemminger L: netdev@vger.kernel.org S: Maintained F: drivers/net/ethernet/marvell/sk* @@ -5093,7 +5077,7 @@ S: Maintained F: drivers/media/radio/radio-mr800.c MSI LAPTOP SUPPORT -M: "Lee, Chun-Yi" +M: "Lee, Chun-Yi" L: platform-driver-x86@vger.kernel.org S: Maintained F: drivers/platform/x86/msi-laptop.c @@ -5189,7 +5173,7 @@ S: Supported F: drivers/infiniband/hw/nes/ NETEM NETWORK EMULATOR -M: Stephen Hemminger +M: Stephen Hemminger L: netem@lists.linux-foundation.org S: Maintained F: net/sched/sch_netem.c @@ -5523,7 +5507,8 @@ M: Benoît Cousson M: Paul Walmsley L: linux-omap@vger.kernel.org S: Maintained -F: arch/arm/mach-omap2/omap_hwmod.* +F: arch/arm/mach-omap2/omap_hwmod.c +F: arch/arm/plat-omap/include/plat/omap_hwmod.h OMAP HWMOD DATA FOR OMAP4-BASED DEVICES M: Benoît Cousson @@ -6594,7 +6579,7 @@ F: drivers/media/platform/s3c-camif/ F: include/media/s3c_camif.h SERIAL DRIVERS -M: Greg Kroah-Hartman +M: Alan Cox L: linux-serial@vger.kernel.org S: Maintained F: drivers/tty/serial @@ -7097,7 +7082,7 @@ F: include/uapi/sound/ F: sound/ SOUND - SOC LAYER / DYNAMIC AUDIO POWER MANAGEMENT (ASoC) -M: Liam Girdwood +M: Liam Girdwood M: Mark Brown T: git git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound.git L: alsa-devel@alsa-project.org (moderated for non-subscribers) @@ -7349,7 +7334,7 @@ S: Odd Fixes F: drivers/staging/speakup/ STAGING - TI DSP BRIDGE DRIVERS -M: Omar Ramirez Luna +M: Omar Ramirez Luna S: Odd Fixes F: drivers/staging/tidspbridge/ @@ -8541,7 +8526,7 @@ F: Documentation/x86/ F: arch/x86/ X86 PLATFORM DRIVERS -M: Matthew Garrett +M: Matthew Garrett L: platform-driver-x86@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/mjg59/platform-drivers-x86.git S: Maintained diff --git a/trunk/Makefile b/trunk/Makefile index 08ef9bdb80c7..a1667c4bcce5 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc7 -NAME = Unicycling Gorilla +EXTRAVERSION = -rc3 +NAME = Terrified Chipmunk # *DOCUMENTATION* # To see a list of typical targets execute "make help" @@ -169,7 +169,7 @@ SUBARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ -e s/arm.*/arm/ -e s/sa110/arm/ \ -e s/s390x/s390/ -e s/parisc64/parisc/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ - -e s/sh[234].*/sh/ -e s/aarch64.*/arm64/ ) + -e s/sh[234].*/sh/ ) # Cross compiling and selecting different set of gcc/bin-utils # --------------------------------------------------------------------------- diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index 35b6756f055b..67874b82a4ed 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -4,7 +4,6 @@ config ARM select ARCH_BINFMT_ELF_RANDOMIZE_PIE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAVE_CUSTOM_GPIO_H - select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT if MMU select CPU_PM if (SUSPEND || CPU_IDLE) @@ -2323,5 +2322,3 @@ source "security/Kconfig" source "crypto/Kconfig" source "lib/Kconfig" - -source "arch/arm/kvm/Kconfig" diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile index 4bcd2d6b0535..30c443c406f3 100644 --- a/trunk/arch/arm/Makefile +++ b/trunk/arch/arm/Makefile @@ -252,7 +252,6 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/ core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ) core-$(CONFIG_VFP) += arch/arm/vfp/ core-$(CONFIG_XEN) += arch/arm/xen/ -core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/ # If we have a machine-specific directory, then include it in the build. core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ diff --git a/trunk/arch/arm/boot/dts/Makefile b/trunk/arch/arm/boot/dts/Makefile index 5ebb44fe826a..e44da40d984f 100644 --- a/trunk/arch/arm/boot/dts/Makefile +++ b/trunk/arch/arm/boot/dts/Makefile @@ -155,7 +155,6 @@ dtb-$(CONFIG_ARCH_VT8500) += vt8500-bv07.dtb \ dtb-$(CONFIG_ARCH_ZYNQ) += zynq-zc702.dtb targets += dtbs -targets += $(dtb-y) endif # *.dtb used to be generated in the directory above. Clean out the diff --git a/trunk/arch/arm/boot/dts/armada-370-db.dts b/trunk/arch/arm/boot/dts/armada-370-db.dts index 9b82facb2561..00044026ef1f 100644 --- a/trunk/arch/arm/boot/dts/armada-370-db.dts +++ b/trunk/arch/arm/boot/dts/armada-370-db.dts @@ -26,7 +26,7 @@ memory { device_type = "memory"; - reg = <0x00000000 0x40000000>; /* 1 GB */ + reg = <0x00000000 0x20000000>; /* 512 MB */ }; soc { diff --git a/trunk/arch/arm/boot/dts/armada-xp-mv78230.dtsi b/trunk/arch/arm/boot/dts/armada-xp-mv78230.dtsi index e041f42ed711..271855a6e224 100644 --- a/trunk/arch/arm/boot/dts/armada-xp-mv78230.dtsi +++ b/trunk/arch/arm/boot/dts/armada-xp-mv78230.dtsi @@ -50,25 +50,27 @@ }; gpio0: gpio@d0018100 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018100 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018100 0x40>, + <0xd0018800 0x30>; ngpios = <32>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <82>, <83>, <84>, <85>; + interrupts = <16>, <17>, <18>, <19>; }; gpio1: gpio@d0018140 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018140 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018140 0x40>, + <0xd0018840 0x30>; ngpios = <17>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <87>, <88>, <89>; + interrupts = <20>, <21>, <22>; }; }; }; diff --git a/trunk/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/trunk/arch/arm/boot/dts/armada-xp-mv78260.dtsi index 9e23bd8c9536..1c1937dbce73 100644 --- a/trunk/arch/arm/boot/dts/armada-xp-mv78260.dtsi +++ b/trunk/arch/arm/boot/dts/armada-xp-mv78260.dtsi @@ -51,36 +51,39 @@ }; gpio0: gpio@d0018100 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018100 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018100 0x40>, + <0xd0018800 0x30>; ngpios = <32>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <82>, <83>, <84>, <85>; + interrupts = <16>, <17>, <18>, <19>; }; gpio1: gpio@d0018140 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018140 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018140 0x40>, + <0xd0018840 0x30>; ngpios = <32>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <87>, <88>, <89>, <90>; + interrupts = <20>, <21>, <22>, <23>; }; gpio2: gpio@d0018180 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018180 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018180 0x40>, + <0xd0018870 0x30>; ngpios = <3>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <91>; + interrupts = <24>; }; ethernet@d0034000 { diff --git a/trunk/arch/arm/boot/dts/armada-xp-mv78460.dtsi b/trunk/arch/arm/boot/dts/armada-xp-mv78460.dtsi index 965966110e38..4905cf3a5ef8 100644 --- a/trunk/arch/arm/boot/dts/armada-xp-mv78460.dtsi +++ b/trunk/arch/arm/boot/dts/armada-xp-mv78460.dtsi @@ -66,36 +66,39 @@ }; gpio0: gpio@d0018100 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018100 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018100 0x40>, + <0xd0018800 0x30>; ngpios = <32>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <82>, <83>, <84>, <85>; + interrupts = <16>, <17>, <18>, <19>; }; gpio1: gpio@d0018140 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018140 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018140 0x40>, + <0xd0018840 0x30>; ngpios = <32>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <87>, <88>, <89>, <90>; + interrupts = <20>, <21>, <22>, <23>; }; gpio2: gpio@d0018180 { - compatible = "marvell,orion-gpio"; - reg = <0xd0018180 0x40>; + compatible = "marvell,armadaxp-gpio"; + reg = <0xd0018180 0x40>, + <0xd0018870 0x30>; ngpios = <3>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupts-cells = <2>; - interrupts = <91>; + interrupts = <24>; }; ethernet@d0034000 { diff --git a/trunk/arch/arm/boot/dts/at91rm9200.dtsi b/trunk/arch/arm/boot/dts/at91rm9200.dtsi index 222047f1ece9..e154f242c680 100644 --- a/trunk/arch/arm/boot/dts/at91rm9200.dtsi +++ b/trunk/arch/arm/boot/dts/at91rm9200.dtsi @@ -336,8 +336,8 @@ i2c@0 { compatible = "i2c-gpio"; - gpios = <&pioA 25 0 /* sda */ - &pioA 26 0 /* scl */ + gpios = <&pioA 23 0 /* sda */ + &pioA 24 0 /* scl */ >; i2c-gpio,sda-open-drain; i2c-gpio,scl-open-drain; diff --git a/trunk/arch/arm/boot/dts/at91sam9260.dtsi b/trunk/arch/arm/boot/dts/at91sam9260.dtsi index cb7bcc51608d..68bccf41a2c6 100644 --- a/trunk/arch/arm/boot/dts/at91sam9260.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9260.dtsi @@ -306,22 +306,6 @@ }; }; - ssc0 { - pinctrl_ssc0_tx: ssc0_tx-0 { - atmel,pins = - <1 16 0x1 0x0 /* PB16 periph A */ - 1 17 0x1 0x0 /* PB17 periph A */ - 1 18 0x1 0x0>; /* PB18 periph A */ - }; - - pinctrl_ssc0_rx: ssc0_rx-0 { - atmel,pins = - <1 19 0x1 0x0 /* PB19 periph A */ - 1 20 0x1 0x0 /* PB20 periph A */ - 1 21 0x1 0x0>; /* PB21 periph A */ - }; - }; - pioA: gpio@fffff400 { compatible = "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; @@ -466,8 +450,6 @@ compatible = "atmel,at91rm9200-ssc"; reg = <0xfffbc000 0x4000>; interrupts = <14 4 5>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/at91sam9263.dtsi b/trunk/arch/arm/boot/dts/at91sam9263.dtsi index 271d4de026e9..32ec62cf5385 100644 --- a/trunk/arch/arm/boot/dts/at91sam9263.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9263.dtsi @@ -271,38 +271,6 @@ }; }; - ssc0 { - pinctrl_ssc0_tx: ssc0_tx-0 { - atmel,pins = - <1 0 0x2 0x0 /* PB0 periph B */ - 1 1 0x2 0x0 /* PB1 periph B */ - 1 2 0x2 0x0>; /* PB2 periph B */ - }; - - pinctrl_ssc0_rx: ssc0_rx-0 { - atmel,pins = - <1 3 0x2 0x0 /* PB3 periph B */ - 1 4 0x2 0x0 /* PB4 periph B */ - 1 5 0x2 0x0>; /* PB5 periph B */ - }; - }; - - ssc1 { - pinctrl_ssc1_tx: ssc1_tx-0 { - atmel,pins = - <1 6 0x1 0x0 /* PB6 periph A */ - 1 7 0x1 0x0 /* PB7 periph A */ - 1 8 0x1 0x0>; /* PB8 periph A */ - }; - - pinctrl_ssc1_rx: ssc1_rx-0 { - atmel,pins = - <1 9 0x1 0x0 /* PB9 periph A */ - 1 10 0x1 0x0 /* PB10 periph A */ - 1 11 0x1 0x0>; /* PB11 periph A */ - }; - }; - pioA: gpio@fffff200 { compatible = "atmel,at91rm9200-gpio"; reg = <0xfffff200 0x200>; @@ -400,8 +368,6 @@ compatible = "atmel,at91rm9200-ssc"; reg = <0xfff98000 0x4000>; interrupts = <16 4 5>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>; status = "disabled"; }; @@ -409,8 +375,6 @@ compatible = "atmel,at91rm9200-ssc"; reg = <0xfff9c000 0x4000>; interrupts = <17 4 5>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/at91sam9g45.dtsi b/trunk/arch/arm/boot/dts/at91sam9g45.dtsi index 6b1d4cab24c2..231858ffd850 100644 --- a/trunk/arch/arm/boot/dts/at91sam9g45.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9g45.dtsi @@ -290,38 +290,6 @@ }; }; - ssc0 { - pinctrl_ssc0_tx: ssc0_tx-0 { - atmel,pins = - <3 0 0x1 0x0 /* PD0 periph A */ - 3 1 0x1 0x0 /* PD1 periph A */ - 3 2 0x1 0x0>; /* PD2 periph A */ - }; - - pinctrl_ssc0_rx: ssc0_rx-0 { - atmel,pins = - <3 3 0x1 0x0 /* PD3 periph A */ - 3 4 0x1 0x0 /* PD4 periph A */ - 3 5 0x1 0x0>; /* PD5 periph A */ - }; - }; - - ssc1 { - pinctrl_ssc1_tx: ssc1_tx-0 { - atmel,pins = - <3 10 0x1 0x0 /* PD10 periph A */ - 3 11 0x1 0x0 /* PD11 periph A */ - 3 12 0x1 0x0>; /* PD12 periph A */ - }; - - pinctrl_ssc1_rx: ssc1_rx-0 { - atmel,pins = - <3 13 0x1 0x0 /* PD13 periph A */ - 3 14 0x1 0x0 /* PD14 periph A */ - 3 15 0x1 0x0>; /* PD15 periph A */ - }; - }; - pioA: gpio@fffff200 { compatible = "atmel,at91rm9200-gpio"; reg = <0xfffff200 0x200>; @@ -457,8 +425,6 @@ compatible = "atmel,at91sam9g45-ssc"; reg = <0xfff9c000 0x4000>; interrupts = <16 4 5>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>; status = "disabled"; }; @@ -466,8 +432,6 @@ compatible = "atmel,at91sam9g45-ssc"; reg = <0xfffa0000 0x4000>; interrupts = <17 4 5>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ssc1_tx &pinctrl_ssc1_rx>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi index 80e29c605d4e..e9efb34f4379 100644 --- a/trunk/arch/arm/boot/dts/at91sam9n12.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9n12.dtsi @@ -28,7 +28,6 @@ tcb1 = &tcb1; i2c0 = &i2c0; i2c1 = &i2c1; - ssc0 = &ssc0; }; cpus { cpu@0 { @@ -245,22 +244,6 @@ }; }; - ssc0 { - pinctrl_ssc0_tx: ssc0_tx-0 { - atmel,pins = - <0 24 0x2 0x0 /* PA24 periph B */ - 0 25 0x2 0x0 /* PA25 periph B */ - 0 26 0x2 0x0>; /* PA26 periph B */ - }; - - pinctrl_ssc0_rx: ssc0_rx-0 { - atmel,pins = - <0 27 0x2 0x0 /* PA27 periph B */ - 0 28 0x2 0x0 /* PA28 periph B */ - 0 29 0x2 0x0>; /* PA29 periph B */ - }; - }; - pioA: gpio@fffff400 { compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; @@ -311,15 +294,6 @@ status = "disabled"; }; - ssc0: ssc@f0010000 { - compatible = "atmel,at91sam9g45-ssc"; - reg = <0xf0010000 0x4000>; - interrupts = <28 4 5>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>; - status = "disabled"; - }; - usart0: serial@f801c000 { compatible = "atmel,at91sam9260-usart"; reg = <0xf801c000 0x4000>; diff --git a/trunk/arch/arm/boot/dts/at91sam9x5.dtsi b/trunk/arch/arm/boot/dts/at91sam9x5.dtsi index 8ecca6948d81..40ac3a4eb1ab 100644 --- a/trunk/arch/arm/boot/dts/at91sam9x5.dtsi +++ b/trunk/arch/arm/boot/dts/at91sam9x5.dtsi @@ -88,6 +88,13 @@ interrupts = <1 4 7>; }; + ssc0: ssc@f0010000 { + compatible = "atmel,at91sam9g45-ssc"; + reg = <0xf0010000 0x4000>; + interrupts = <28 4 5>; + status = "disabled"; + }; + tcb0: timer@f8008000 { compatible = "atmel,at91sam9x5-tcb"; reg = <0xf8008000 0x100>; @@ -143,11 +150,6 @@ atmel,pins = <0 3 0x1 0x0>; /* PA3 periph A */ }; - - pinctrl_usart0_sck: usart0_sck-0 { - atmel,pins = - <0 4 0x1 0x0>; /* PA4 periph A */ - }; }; usart1 { @@ -159,17 +161,12 @@ pinctrl_usart1_rts: usart1_rts-0 { atmel,pins = - <2 27 0x3 0x0>; /* PC27 periph C */ + <3 27 0x3 0x0>; /* PC27 periph C */ }; pinctrl_usart1_cts: usart1_cts-0 { atmel,pins = - <2 28 0x3 0x0>; /* PC28 periph C */ - }; - - pinctrl_usart1_sck: usart1_sck-0 { - atmel,pins = - <2 28 0x3 0x0>; /* PC29 periph C */ + <3 28 0x3 0x0>; /* PC28 periph C */ }; }; @@ -182,56 +179,46 @@ pinctrl_uart2_rts: uart2_rts-0 { atmel,pins = - <1 0 0x2 0x0>; /* PB0 periph B */ + <0 0 0x2 0x0>; /* PB0 periph B */ }; pinctrl_uart2_cts: uart2_cts-0 { atmel,pins = - <1 1 0x2 0x0>; /* PB1 periph B */ - }; - - pinctrl_usart2_sck: usart2_sck-0 { - atmel,pins = - <1 2 0x2 0x0>; /* PB2 periph B */ + <0 1 0x2 0x0>; /* PB1 periph B */ }; }; usart3 { pinctrl_uart3: usart3-0 { atmel,pins = - <2 23 0x2 0x1 /* PC22 periph B with pullup */ - 2 23 0x2 0x0>; /* PC23 periph B */ + <3 23 0x2 0x1 /* PC22 periph B with pullup */ + 3 23 0x2 0x0>; /* PC23 periph B */ }; pinctrl_usart3_rts: usart3_rts-0 { atmel,pins = - <2 24 0x2 0x0>; /* PC24 periph B */ + <3 24 0x2 0x0>; /* PC24 periph B */ }; pinctrl_usart3_cts: usart3_cts-0 { atmel,pins = - <2 25 0x2 0x0>; /* PC25 periph B */ - }; - - pinctrl_usart3_sck: usart3_sck-0 { - atmel,pins = - <2 26 0x2 0x0>; /* PC26 periph B */ + <3 25 0x2 0x0>; /* PC25 periph B */ }; }; uart0 { pinctrl_uart0: uart0-0 { atmel,pins = - <2 8 0x3 0x0 /* PC8 periph C */ - 2 9 0x3 0x1>; /* PC9 periph C with pullup */ + <3 8 0x3 0x0 /* PC8 periph C */ + 3 9 0x3 0x1>; /* PC9 periph C with pullup */ }; }; uart1 { pinctrl_uart1: uart1-0 { atmel,pins = - <2 16 0x3 0x0 /* PC16 periph C */ - 2 17 0x3 0x1>; /* PC17 periph C with pullup */ + <3 16 0x3 0x0 /* PC16 periph C */ + 3 17 0x3 0x1>; /* PC17 periph C with pullup */ }; }; @@ -260,14 +247,14 @@ pinctrl_macb0_rmii_mii: macb0_rmii_mii-0 { atmel,pins = - <1 8 0x1 0x0 /* PB8 periph A */ - 1 11 0x1 0x0 /* PB11 periph A */ - 1 12 0x1 0x0 /* PB12 periph A */ - 1 13 0x1 0x0 /* PB13 periph A */ - 1 14 0x1 0x0 /* PB14 periph A */ - 1 15 0x1 0x0 /* PB15 periph A */ - 1 16 0x1 0x0 /* PB16 periph A */ - 1 17 0x1 0x0>; /* PB17 periph A */ + <1 8 0x1 0x0 /* PA8 periph A */ + 1 11 0x1 0x0 /* PA11 periph A */ + 1 12 0x1 0x0 /* PA12 periph A */ + 1 13 0x1 0x0 /* PA13 periph A */ + 1 14 0x1 0x0 /* PA14 periph A */ + 1 15 0x1 0x0 /* PA15 periph A */ + 1 16 0x1 0x0 /* PA16 periph A */ + 1 17 0x1 0x0>; /* PA17 periph A */ }; }; @@ -303,22 +290,6 @@ }; }; - ssc0 { - pinctrl_ssc0_tx: ssc0_tx-0 { - atmel,pins = - <0 24 0x2 0x0 /* PA24 periph B */ - 0 25 0x2 0x0 /* PA25 periph B */ - 0 26 0x2 0x0>; /* PA26 periph B */ - }; - - pinctrl_ssc0_rx: ssc0_rx-0 { - atmel,pins = - <0 27 0x2 0x0 /* PA27 periph B */ - 0 28 0x2 0x0 /* PA28 periph B */ - 0 29 0x2 0x0>; /* PA29 periph B */ - }; - }; - pioA: gpio@fffff400 { compatible = "atmel,at91sam9x5-gpio", "atmel,at91rm9200-gpio"; reg = <0xfffff400 0x200>; @@ -362,15 +333,6 @@ }; }; - ssc0: ssc@f0010000 { - compatible = "atmel,at91sam9g45-ssc"; - reg = <0xf0010000 0x4000>; - interrupts = <28 4 5>; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_ssc0_tx &pinctrl_ssc0_rx>; - status = "disabled"; - }; - mmc0: mmc@f0008000 { compatible = "atmel,hsmci"; reg = <0xf0008000 0x600>; diff --git a/trunk/arch/arm/boot/dts/cros5250-common.dtsi b/trunk/arch/arm/boot/dts/cros5250-common.dtsi index 46c098017036..fddd17417433 100644 --- a/trunk/arch/arm/boot/dts/cros5250-common.dtsi +++ b/trunk/arch/arm/boot/dts/cros5250-common.dtsi @@ -96,8 +96,8 @@ fifo-depth = <0x80>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; - samsung,dw-mshc-sdr-timing = <2 3>; - samsung,dw-mshc-ddr-timing = <1 2>; + samsung,dw-mshc-sdr-timing = <2 3 3>; + samsung,dw-mshc-ddr-timing = <1 2 3>; slot@0 { reg = <0>; @@ -120,8 +120,8 @@ fifo-depth = <0x80>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; - samsung,dw-mshc-sdr-timing = <2 3>; - samsung,dw-mshc-ddr-timing = <1 2>; + samsung,dw-mshc-sdr-timing = <2 3 3>; + samsung,dw-mshc-ddr-timing = <1 2 3>; slot@0 { reg = <0>; @@ -141,8 +141,8 @@ fifo-depth = <0x80>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; - samsung,dw-mshc-sdr-timing = <2 3>; - samsung,dw-mshc-ddr-timing = <1 2>; + samsung,dw-mshc-sdr-timing = <2 3 3>; + samsung,dw-mshc-ddr-timing = <1 2 3>; slot@0 { reg = <0>; diff --git a/trunk/arch/arm/boot/dts/dove-cubox.dts b/trunk/arch/arm/boot/dts/dove-cubox.dts index cdee96fca6e2..fed7d3f9f431 100644 --- a/trunk/arch/arm/boot/dts/dove-cubox.dts +++ b/trunk/arch/arm/boot/dts/dove-cubox.dts @@ -26,15 +26,10 @@ }; &uart0 { status = "okay"; }; +&sdio0 { status = "okay"; }; &sata0 { status = "okay"; }; &i2c0 { status = "okay"; }; -&sdio0 { - status = "okay"; - /* sdio0 card detect is connected to wrong pin on CuBox */ - cd-gpios = <&gpio0 12 1>; -}; - &spi0 { status = "okay"; @@ -47,14 +42,9 @@ }; &pinctrl { - pinctrl-0 = <&pmx_gpio_12 &pmx_gpio_18>; + pinctrl-0 = <&pmx_gpio_18>; pinctrl-names = "default"; - pmx_gpio_12: pmx-gpio-12 { - marvell,pins = "mpp12"; - marvell,function = "gpio"; - }; - pmx_gpio_18: pmx-gpio-18 { marvell,pins = "mpp18"; marvell,function = "gpio"; diff --git a/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts b/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts index e05b18f3c33d..942d5761ca97 100644 --- a/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts +++ b/trunk/arch/arm/boot/dts/exynos5250-smdk5250.dts @@ -115,8 +115,8 @@ fifo-depth = <0x80>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; - samsung,dw-mshc-sdr-timing = <2 3>; - samsung,dw-mshc-ddr-timing = <1 2>; + samsung,dw-mshc-sdr-timing = <2 3 3>; + samsung,dw-mshc-ddr-timing = <1 2 3>; slot@0 { reg = <0>; @@ -139,8 +139,8 @@ fifo-depth = <0x80>; card-detect-delay = <200>; samsung,dw-mshc-ciu-div = <3>; - samsung,dw-mshc-sdr-timing = <2 3>; - samsung,dw-mshc-ddr-timing = <1 2>; + samsung,dw-mshc-sdr-timing = <2 3 3>; + samsung,dw-mshc-ddr-timing = <1 2 3>; slot@0 { reg = <0>; diff --git a/trunk/arch/arm/boot/dts/kirkwood-ns2-common.dtsi b/trunk/arch/arm/boot/dts/kirkwood-ns2-common.dtsi index 77d21abfcdf7..9bc6785ad228 100644 --- a/trunk/arch/arm/boot/dts/kirkwood-ns2-common.dtsi +++ b/trunk/arch/arm/boot/dts/kirkwood-ns2-common.dtsi @@ -1,5 +1,4 @@ /include/ "kirkwood.dtsi" -/include/ "kirkwood-6281.dtsi" / { chosen { @@ -7,21 +6,6 @@ }; ocp@f1000000 { - pinctrl: pinctrl@10000 { - pinctrl-0 = < &pmx_spi &pmx_twsi0 &pmx_uart0 - &pmx_ns2_sata0 &pmx_ns2_sata1>; - pinctrl-names = "default"; - - pmx_ns2_sata0: pmx-ns2-sata0 { - marvell,pins = "mpp21"; - marvell,function = "sata0"; - }; - pmx_ns2_sata1: pmx-ns2-sata1 { - marvell,pins = "mpp20"; - marvell,function = "sata1"; - }; - }; - serial@12000 { clock-frequency = <166666667>; status = "okay"; diff --git a/trunk/arch/arm/boot/dts/kirkwood.dtsi b/trunk/arch/arm/boot/dts/kirkwood.dtsi index d6ab442b7011..110d6cbb795b 100644 --- a/trunk/arch/arm/boot/dts/kirkwood.dtsi +++ b/trunk/arch/arm/boot/dts/kirkwood.dtsi @@ -36,7 +36,6 @@ reg = <0x10100 0x40>; ngpios = <32>; interrupt-controller; - #interrupt-cells = <2>; interrupts = <35>, <36>, <37>, <38>; }; @@ -47,7 +46,6 @@ reg = <0x10140 0x40>; ngpios = <18>; interrupt-controller; - #interrupt-cells = <2>; interrupts = <39>, <40>, <41>; }; diff --git a/trunk/arch/arm/boot/dts/kizbox.dts b/trunk/arch/arm/boot/dts/kizbox.dts index b4dc3ed9a3ec..e8814fe0e277 100644 --- a/trunk/arch/arm/boot/dts/kizbox.dts +++ b/trunk/arch/arm/boot/dts/kizbox.dts @@ -48,8 +48,6 @@ macb0: ethernet@fffc4000 { phy-mode = "mii"; - pinctrl-0 = <&pinctrl_macb_rmii - &pinctrl_macb_rmii_mii_alt>; status = "okay"; }; diff --git a/trunk/arch/arm/boot/dts/sunxi.dtsi b/trunk/arch/arm/boot/dts/sunxi.dtsi index 8b36abea9f2e..8bbc2bfef221 100644 --- a/trunk/arch/arm/boot/dts/sunxi.dtsi +++ b/trunk/arch/arm/boot/dts/sunxi.dtsi @@ -60,21 +60,19 @@ }; uart0: uart@01c28000 { - compatible = "snps,dw-apb-uart"; + compatible = "ns8250"; reg = <0x01c28000 0x400>; interrupts = <1>; reg-shift = <2>; - reg-io-width = <4>; clock-frequency = <24000000>; status = "disabled"; }; uart1: uart@01c28400 { - compatible = "snps,dw-apb-uart"; + compatible = "ns8250"; reg = <0x01c28400 0x400>; interrupts = <2>; reg-shift = <2>; - reg-io-width = <4>; clock-frequency = <24000000>; status = "disabled"; }; diff --git a/trunk/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts b/trunk/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts index cf8071ad22d5..1fc405a9ecfb 100644 --- a/trunk/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts +++ b/trunk/arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts @@ -45,6 +45,7 @@ reg = <1>; }; +/* A7s disabled till big.LITTLE patches are available... cpu2: cpu@2 { device_type = "cpu"; compatible = "arm,cortex-a7"; @@ -62,6 +63,7 @@ compatible = "arm,cortex-a7"; reg = <0x102>; }; +*/ }; memory@80000000 { diff --git a/trunk/arch/arm/common/gic.c b/trunk/arch/arm/common/gic.c index 87dfa9026c5b..36ae03a3f5d1 100644 --- a/trunk/arch/arm/common/gic.c +++ b/trunk/arch/arm/common/gic.c @@ -351,25 +351,6 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) irq_set_chained_handler(irq, gic_handle_cascade_irq); } -static u8 gic_get_cpumask(struct gic_chip_data *gic) -{ - void __iomem *base = gic_data_dist_base(gic); - u32 mask, i; - - for (i = mask = 0; i < 32; i += 4) { - mask = readl_relaxed(base + GIC_DIST_TARGET + i); - mask |= mask >> 16; - mask |= mask >> 8; - if (mask) - break; - } - - if (!mask) - pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); - - return mask; -} - static void __init gic_dist_init(struct gic_chip_data *gic) { unsigned int i; @@ -388,9 +369,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic) /* * Set all global interrupts to this CPU only. */ - cpumask = gic_get_cpumask(gic); - cpumask |= cpumask << 8; - cpumask |= cpumask << 16; + cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); for (i = 32; i < gic_irqs; i += 4) writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); @@ -421,7 +400,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) * Get what the GIC says our CPU mask is. */ BUG_ON(cpu >= NR_GIC_CPU_IF); - cpu_mask = gic_get_cpumask(gic); + cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); gic_cpu_map[cpu] = cpu_mask; /* diff --git a/trunk/arch/arm/configs/at91_dt_defconfig b/trunk/arch/arm/configs/at91_dt_defconfig index 1ea959019fcd..b175577d7abb 100644 --- a/trunk/arch/arm/configs/at91_dt_defconfig +++ b/trunk/arch/arm/configs/at91_dt_defconfig @@ -19,7 +19,6 @@ CONFIG_SOC_AT91SAM9260=y CONFIG_SOC_AT91SAM9263=y CONFIG_SOC_AT91SAM9G45=y CONFIG_SOC_AT91SAM9X5=y -CONFIG_SOC_AT91SAM9N12=y CONFIG_MACH_AT91SAM_DT=y CONFIG_AT91_PROGRAMMABLE_CLOCKS=y CONFIG_AT91_TIMER_HZ=128 @@ -32,7 +31,7 @@ CONFIG_ZBOOT_ROM_TEXT=0x0 CONFIG_ZBOOT_ROM_BSS=0x0 CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y -CONFIG_CMDLINE="console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" +CONFIG_CMDLINE="mem=128M console=ttyS0,115200 initrd=0x21100000,25165824 root=/dev/ram0 rw" CONFIG_KEXEC=y CONFIG_AUTO_ZRELADDR=y # CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set diff --git a/trunk/arch/arm/include/asm/cputype.h b/trunk/arch/arm/include/asm/cputype.h index ad41ec2471e8..a59dcb5ab5fc 100644 --- a/trunk/arch/arm/include/asm/cputype.h +++ b/trunk/arch/arm/include/asm/cputype.h @@ -64,24 +64,6 @@ extern unsigned int processor_id; #define read_cpuid_ext(reg) 0 #endif -#define ARM_CPU_IMP_ARM 0x41 -#define ARM_CPU_IMP_INTEL 0x69 - -#define ARM_CPU_PART_ARM1136 0xB360 -#define ARM_CPU_PART_ARM1156 0xB560 -#define ARM_CPU_PART_ARM1176 0xB760 -#define ARM_CPU_PART_ARM11MPCORE 0xB020 -#define ARM_CPU_PART_CORTEX_A8 0xC080 -#define ARM_CPU_PART_CORTEX_A9 0xC090 -#define ARM_CPU_PART_CORTEX_A5 0xC050 -#define ARM_CPU_PART_CORTEX_A15 0xC0F0 -#define ARM_CPU_PART_CORTEX_A7 0xC070 - -#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 -#define ARM_CPU_XSCALE_ARCH_V1 0x2000 -#define ARM_CPU_XSCALE_ARCH_V2 0x4000 -#define ARM_CPU_XSCALE_ARCH_V3 0x6000 - /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID @@ -92,21 +74,6 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) return read_cpuid(CPUID_ID); } -static inline unsigned int __attribute_const__ read_cpuid_implementor(void) -{ - return (read_cpuid_id() & 0xFF000000) >> 24; -} - -static inline unsigned int __attribute_const__ read_cpuid_part_number(void) -{ - return read_cpuid_id() & 0xFFF0; -} - -static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) -{ - return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; -} - static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) { return read_cpuid(CPUID_CACHETYPE); diff --git a/trunk/arch/arm/include/asm/idmap.h b/trunk/arch/arm/include/asm/idmap.h index 1a66f907e5cc..bf863edb517d 100644 --- a/trunk/arch/arm/include/asm/idmap.h +++ b/trunk/arch/arm/include/asm/idmap.h @@ -8,7 +8,6 @@ #define __idmap __section(.idmap.text) noinline notrace extern pgd_t *idmap_pgd; -extern pgd_t *hyp_pgd; void setup_mm_for_reboot(void); diff --git a/trunk/arch/arm/include/asm/kvm_arm.h b/trunk/arch/arm/include/asm/kvm_arm.h deleted file mode 100644 index 7c3d813e15df..000000000000 --- a/trunk/arch/arm/include/asm/kvm_arm.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_ARM_H__ -#define __ARM_KVM_ARM_H__ - -#include - -/* Hyp Configuration Register (HCR) bits */ -#define HCR_TGE (1 << 27) -#define HCR_TVM (1 << 26) -#define HCR_TTLB (1 << 25) -#define HCR_TPU (1 << 24) -#define HCR_TPC (1 << 23) -#define HCR_TSW (1 << 22) -#define HCR_TAC (1 << 21) -#define HCR_TIDCP (1 << 20) -#define HCR_TSC (1 << 19) -#define HCR_TID3 (1 << 18) -#define HCR_TID2 (1 << 17) -#define HCR_TID1 (1 << 16) -#define HCR_TID0 (1 << 15) -#define HCR_TWE (1 << 14) -#define HCR_TWI (1 << 13) -#define HCR_DC (1 << 12) -#define HCR_BSU (3 << 10) -#define HCR_BSU_IS (1 << 10) -#define HCR_FB (1 << 9) -#define HCR_VA (1 << 8) -#define HCR_VI (1 << 7) -#define HCR_VF (1 << 6) -#define HCR_AMO (1 << 5) -#define HCR_IMO (1 << 4) -#define HCR_FMO (1 << 3) -#define HCR_PTW (1 << 2) -#define HCR_SWIO (1 << 1) -#define HCR_VM 1 - -/* - * The bits we set in HCR: - * TAC: Trap ACTLR - * TSC: Trap SMC - * TSW: Trap cache operations by set/way - * TWI: Trap WFI - * TIDCP: Trap L2CTLR/L2ECTLR - * BSU_IS: Upgrade barriers to the inner shareable domain - * FB: Force broadcast of all maintainance operations - * AMO: Override CPSR.A and enable signaling with VA - * IMO: Override CPSR.I and enable signaling with VI - * FMO: Override CPSR.F and enable signaling with VF - * SWIO: Turn set/way invalidates into set/way clean+invalidate - */ -#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ - HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ - HCR_SWIO | HCR_TIDCP) -#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) - -/* System Control Register (SCTLR) bits */ -#define SCTLR_TE (1 << 30) -#define SCTLR_EE (1 << 25) -#define SCTLR_V (1 << 13) - -/* Hyp System Control Register (HSCTLR) bits */ -#define HSCTLR_TE (1 << 30) -#define HSCTLR_EE (1 << 25) -#define HSCTLR_FI (1 << 21) -#define HSCTLR_WXN (1 << 19) -#define HSCTLR_I (1 << 12) -#define HSCTLR_C (1 << 2) -#define HSCTLR_A (1 << 1) -#define HSCTLR_M 1 -#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \ - HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE) - -/* TTBCR and HTCR Registers bits */ -#define TTBCR_EAE (1 << 31) -#define TTBCR_IMP (1 << 30) -#define TTBCR_SH1 (3 << 28) -#define TTBCR_ORGN1 (3 << 26) -#define TTBCR_IRGN1 (3 << 24) -#define TTBCR_EPD1 (1 << 23) -#define TTBCR_A1 (1 << 22) -#define TTBCR_T1SZ (3 << 16) -#define TTBCR_SH0 (3 << 12) -#define TTBCR_ORGN0 (3 << 10) -#define TTBCR_IRGN0 (3 << 8) -#define TTBCR_EPD0 (1 << 7) -#define TTBCR_T0SZ 3 -#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) - -/* Hyp System Trap Register */ -#define HSTR_T(x) (1 << x) -#define HSTR_TTEE (1 << 16) -#define HSTR_TJDBX (1 << 17) - -/* Hyp Coprocessor Trap Register */ -#define HCPTR_TCP(x) (1 << x) -#define HCPTR_TCP_MASK (0x3fff) -#define HCPTR_TASE (1 << 15) -#define HCPTR_TTA (1 << 20) -#define HCPTR_TCPAC (1 << 31) - -/* Hyp Debug Configuration Register bits */ -#define HDCR_TDRA (1 << 11) -#define HDCR_TDOSA (1 << 10) -#define HDCR_TDA (1 << 9) -#define HDCR_TDE (1 << 8) -#define HDCR_HPME (1 << 7) -#define HDCR_TPM (1 << 6) -#define HDCR_TPMCR (1 << 5) -#define HDCR_HPMN_MASK (0x1F) - -/* - * The architecture supports 40-bit IPA as input to the 2nd stage translations - * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address - * space. - */ -#define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) -#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) -#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) -#define S2_PGD_SIZE (1 << S2_PGD_ORDER) - -/* Virtualization Translation Control Register (VTCR) bits */ -#define VTCR_SH0 (3 << 12) -#define VTCR_ORGN0 (3 << 10) -#define VTCR_IRGN0 (3 << 8) -#define VTCR_SL0 (3 << 6) -#define VTCR_S (1 << 4) -#define VTCR_T0SZ (0xf) -#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \ - VTCR_S | VTCR_T0SZ) -#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0) -#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */ -#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */ -#define KVM_VTCR_SL0 VTCR_SL_L1 -/* stage-2 input address range defined as 2^(32-T0SZ) */ -#define KVM_T0SZ (32 - KVM_PHYS_SHIFT) -#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ) -#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S) - -/* Virtualization Translation Table Base Register (VTTBR) bits */ -#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */ -#define VTTBR_X (14 - KVM_T0SZ) -#else -#define VTTBR_X (5 - KVM_T0SZ) -#endif -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) -#define VTTBR_VMID_SHIFT (48LLU) -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) - -/* Hyp Syndrome Register (HSR) bits */ -#define HSR_EC_SHIFT (26) -#define HSR_EC (0x3fU << HSR_EC_SHIFT) -#define HSR_IL (1U << 25) -#define HSR_ISS (HSR_IL - 1) -#define HSR_ISV_SHIFT (24) -#define HSR_ISV (1U << HSR_ISV_SHIFT) -#define HSR_SRT_SHIFT (16) -#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) -#define HSR_FSC (0x3f) -#define HSR_FSC_TYPE (0x3c) -#define HSR_SSE (1 << 21) -#define HSR_WNR (1 << 6) -#define HSR_CV_SHIFT (24) -#define HSR_CV (1U << HSR_CV_SHIFT) -#define HSR_COND_SHIFT (20) -#define HSR_COND (0xfU << HSR_COND_SHIFT) - -#define FSC_FAULT (0x04) -#define FSC_PERM (0x0c) - -/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ -#define HPFAR_MASK (~0xf) - -#define HSR_EC_UNKNOWN (0x00) -#define HSR_EC_WFI (0x01) -#define HSR_EC_CP15_32 (0x03) -#define HSR_EC_CP15_64 (0x04) -#define HSR_EC_CP14_MR (0x05) -#define HSR_EC_CP14_LS (0x06) -#define HSR_EC_CP_0_13 (0x07) -#define HSR_EC_CP10_ID (0x08) -#define HSR_EC_JAZELLE (0x09) -#define HSR_EC_BXJ (0x0A) -#define HSR_EC_CP14_64 (0x0C) -#define HSR_EC_SVC_HYP (0x11) -#define HSR_EC_HVC (0x12) -#define HSR_EC_SMC (0x13) -#define HSR_EC_IABT (0x20) -#define HSR_EC_IABT_HYP (0x21) -#define HSR_EC_DABT (0x24) -#define HSR_EC_DABT_HYP (0x25) - -#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) - -#endif /* __ARM_KVM_ARM_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_asm.h b/trunk/arch/arm/include/asm/kvm_asm.h deleted file mode 100644 index 5e06e8177784..000000000000 --- a/trunk/arch/arm/include/asm/kvm_asm.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_ASM_H__ -#define __ARM_KVM_ASM_H__ - -/* 0 is reserved as an invalid value. */ -#define c0_MPIDR 1 /* MultiProcessor ID Register */ -#define c0_CSSELR 2 /* Cache Size Selection Register */ -#define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxilliary Control Register */ -#define c1_CPACR 5 /* Coprocessor Access Control */ -#define c2_TTBR0 6 /* Translation Table Base Register 0 */ -#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ -#define c2_TTBR1 8 /* Translation Table Base Register 1 */ -#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */ -#define c2_TTBCR 10 /* Translation Table Base Control R. */ -#define c3_DACR 11 /* Domain Access Control Register */ -#define c5_DFSR 12 /* Data Fault Status Register */ -#define c5_IFSR 13 /* Instruction Fault Status Register */ -#define c5_ADFSR 14 /* Auxilary Data Fault Status R */ -#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ -#define c6_DFAR 16 /* Data Fault Address Register */ -#define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */ -#define c10_PRRR 19 /* Primary Region Remap Register */ -#define c10_NMRR 20 /* Normal Memory Remap Register */ -#define c12_VBAR 21 /* Vector Base Address Register */ -#define c13_CID 22 /* Context ID Register */ -#define c13_TID_URW 23 /* Thread ID, User R/W */ -#define c13_TID_URO 24 /* Thread ID, User R/O */ -#define c13_TID_PRIV 25 /* Thread ID, Privileged */ -#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */ - -#define ARM_EXCEPTION_RESET 0 -#define ARM_EXCEPTION_UNDEFINED 1 -#define ARM_EXCEPTION_SOFTWARE 2 -#define ARM_EXCEPTION_PREF_ABORT 3 -#define ARM_EXCEPTION_DATA_ABORT 4 -#define ARM_EXCEPTION_IRQ 5 -#define ARM_EXCEPTION_FIQ 6 -#define ARM_EXCEPTION_HVC 7 - -#ifndef __ASSEMBLY__ -struct kvm; -struct kvm_vcpu; - -extern char __kvm_hyp_init[]; -extern char __kvm_hyp_init_end[]; - -extern char __kvm_hyp_exit[]; -extern char __kvm_hyp_exit_end[]; - -extern char __kvm_hyp_vector[]; - -extern char __kvm_hyp_code_start[]; -extern char __kvm_hyp_code_end[]; - -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); - -extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); - -extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); -#endif - -#endif /* __ARM_KVM_ASM_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_coproc.h b/trunk/arch/arm/include/asm/kvm_coproc.h deleted file mode 100644 index 4917c2f7e459..000000000000 --- a/trunk/arch/arm/include/asm/kvm_coproc.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (C) 2012 Rusty Russell IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_COPROC_H__ -#define __ARM_KVM_COPROC_H__ -#include - -void kvm_reset_coprocs(struct kvm_vcpu *vcpu); - -struct kvm_coproc_target_table { - unsigned target; - const struct coproc_reg *table; - size_t num; -}; -void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); - -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); - -unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); -void kvm_coproc_table_init(void); - -struct kvm_one_reg; -int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); -int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); -int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); -unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); -#endif /* __ARM_KVM_COPROC_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_emulate.h b/trunk/arch/arm/include/asm/kvm_emulate.h deleted file mode 100644 index fd611996bfb5..000000000000 --- a/trunk/arch/arm/include/asm/kvm_emulate.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_EMULATE_H__ -#define __ARM_KVM_EMULATE_H__ - -#include -#include -#include - -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); -u32 *vcpu_spsr(struct kvm_vcpu *vcpu); - -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); -void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); -void kvm_inject_undefined(struct kvm_vcpu *vcpu); -void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); -void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); - -static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) -{ - return 1; -} - -static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) -{ - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; -} - -static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) -{ - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; -} - -static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) -{ - *vcpu_cpsr(vcpu) |= PSR_T_BIT; -} - -static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; - return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); -} - -static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; - return cpsr_mode > USR_MODE;; -} - -static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) -{ - return reg == 15; -} - -#endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_host.h b/trunk/arch/arm/include/asm/kvm_host.h deleted file mode 100644 index 98b4d1a72923..000000000000 --- a/trunk/arch/arm/include/asm/kvm_host.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_HOST_H__ -#define __ARM_KVM_HOST_H__ - -#include -#include -#include -#include - -#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS -#define KVM_MEMORY_SLOTS 32 -#define KVM_PRIVATE_MEM_SLOTS 4 -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -#define KVM_HAVE_ONE_REG - -#define KVM_VCPU_MAX_FEATURES 1 - -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x) 0 -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) - -struct kvm_vcpu; -u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); -int kvm_target_cpu(void); -int kvm_reset_vcpu(struct kvm_vcpu *vcpu); -void kvm_reset_coprocs(struct kvm_vcpu *vcpu); - -struct kvm_arch { - /* VTTBR value associated with below pgd and vmid */ - u64 vttbr; - - /* - * Anything that is not used directly from assembly code goes - * here. - */ - - /* The VMID generation used for the virt. memory system */ - u64 vmid_gen; - u32 vmid; - - /* Stage-2 page table */ - pgd_t *pgd; -}; - -#define KVM_NR_MEM_OBJS 40 - -/* - * We don't want allocation failures within the mmu code, so we preallocate - * enough memory for a single page fault in a cache. - */ -struct kvm_mmu_memory_cache { - int nobjs; - void *objects[KVM_NR_MEM_OBJS]; -}; - -struct kvm_vcpu_arch { - struct kvm_regs regs; - - int target; /* Processor target */ - DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - - /* System control coprocessor (cp15) */ - u32 cp15[NR_CP15_REGS]; - - /* The CPU type we expose to the VM */ - u32 midr; - - /* Exception Information */ - u32 hsr; /* Hyp Syndrome Register */ - u32 hxfar; /* Hyp Data/Inst Fault Address Register */ - u32 hpfar; /* Hyp IPA Fault Address Register */ - - /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - struct vfp_hard_struct *vfp_host; - - /* - * Anything that is not used directly from assembly code goes - * here. - */ - /* dcache set/way operation pending */ - int last_pcpu; - cpumask_t require_dcache_flush; - - /* Don't run the guest on this vcpu */ - bool pause; - - /* IO related fields */ - struct kvm_decode mmio_decode; - - /* Interrupt related fields */ - u32 irq_lines; /* IRQ and FIQ levels */ - - /* Hyp exception information */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ - - /* Cache some mmu pages needed inside spinlock regions */ - struct kvm_mmu_memory_cache mmu_page_cache; - - /* Detect first run of a vcpu */ - bool has_run_once; -}; - -struct kvm_vm_stat { - u32 remote_tlb_flush; -}; - -struct kvm_vcpu_stat { - u32 halt_wakeup; -}; - -struct kvm_vcpu_init; -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init); -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); -struct kvm_one_reg; -int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -u64 kvm_call_hyp(void *hypfn, ...); -void force_vm_exit(const cpumask_t *mask); - -#define KVM_ARCH_WANT_MMU_NOTIFIER -struct kvm; -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); - -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); - -/* We do not have shadow page tables, hence the empty hooks */ -static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) -{ - return 0; -} - -static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - return 0; -} -#endif /* __ARM_KVM_HOST_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_mmio.h b/trunk/arch/arm/include/asm/kvm_mmio.h deleted file mode 100644 index adcc0d7d3175..000000000000 --- a/trunk/arch/arm/include/asm/kvm_mmio.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_MMIO_H__ -#define __ARM_KVM_MMIO_H__ - -#include -#include -#include - -struct kvm_decode { - unsigned long rt; - bool sign_extend; -}; - -/* - * The in-kernel MMIO emulation code wants to use a copy of run->mmio, - * which is an anonymous type. Use our own type instead. - */ -struct kvm_exit_mmio { - phys_addr_t phys_addr; - u8 data[8]; - u32 len; - bool is_write; -}; - -static inline void kvm_prepare_mmio(struct kvm_run *run, - struct kvm_exit_mmio *mmio) -{ - run->mmio.phys_addr = mmio->phys_addr; - run->mmio.len = mmio->len; - run->mmio.is_write = mmio->is_write; - memcpy(run->mmio.data, mmio->data, mmio->len); - run->exit_reason = KVM_EXIT_MMIO; -} - -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa); - -#endif /* __ARM_KVM_MMIO_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_mmu.h b/trunk/arch/arm/include/asm/kvm_mmu.h deleted file mode 100644 index 421a20b34874..000000000000 --- a/trunk/arch/arm/include/asm/kvm_mmu.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_MMU_H__ -#define __ARM_KVM_MMU_H__ - -int create_hyp_mappings(void *from, void *to); -int create_hyp_io_mappings(void *from, void *to, phys_addr_t); -void free_hyp_pmds(void); - -int kvm_alloc_stage2_pgd(struct kvm *kvm); -void kvm_free_stage2_pgd(struct kvm *kvm); -int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size); - -int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); - -void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); - -phys_addr_t kvm_mmu_get_httbr(void); -int kvm_mmu_init(void); -void kvm_clear_hyp_idmap(void); - -static inline bool kvm_is_write_fault(unsigned long hsr) -{ - unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; - if (hsr_ec == HSR_EC_IABT) - return false; - else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) - return false; - else - return true; -} - -#endif /* __ARM_KVM_MMU_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_psci.h b/trunk/arch/arm/include/asm/kvm_psci.h deleted file mode 100644 index 9a83d98bf170..000000000000 --- a/trunk/arch/arm/include/asm/kvm_psci.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_PSCI_H__ -#define __ARM_KVM_PSCI_H__ - -bool kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/trunk/arch/arm/include/asm/memory.h b/trunk/arch/arm/include/asm/memory.h index 1c4df27f9332..73cf03aa981e 100644 --- a/trunk/arch/arm/include/asm/memory.h +++ b/trunk/arch/arm/include/asm/memory.h @@ -37,7 +37,7 @@ */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) +#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) /* * The maximum size of a 26-bit user space task. diff --git a/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h b/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h index 18f5cef82ad5..d7952824c5c4 100644 --- a/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -32,9 +32,6 @@ #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) #define PMD_BIT4 (_AT(pmdval_t, 0)) #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) -#define PMD_APTABLE_SHIFT (61) -#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT) -#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59) /* * - section @@ -44,11 +41,9 @@ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) -#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) #define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) -#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6) #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) /* diff --git a/trunk/arch/arm/include/asm/pgtable-3level.h b/trunk/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64c..a3f37929940a 100644 --- a/trunk/arch/arm/include/asm/pgtable-3level.h +++ b/trunk/arch/arm/include/asm/pgtable-3level.h @@ -104,29 +104,11 @@ */ #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ -/* - * 2nd stage PTE definitions for LPAE. - */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ - -/* - * Hyp-mode PL2 PTE definitions for LPAE. - */ -#define L_PTE_HYP L_PTE_USER - #ifndef __ASSEMBLY__ #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!(pud_val(pud) & 2)) #define pud_present(pud) (pud_val(pud)) -#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ - PMD_TYPE_TABLE) -#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ - PMD_TYPE_SECT) #define pud_clear(pudp) \ do { \ diff --git a/trunk/arch/arm/include/asm/pgtable.h b/trunk/arch/arm/include/asm/pgtable.h index f30ac3b55ba9..9c82f988c0e3 100644 --- a/trunk/arch/arm/include/asm/pgtable.h +++ b/trunk/arch/arm/include/asm/pgtable.h @@ -70,9 +70,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); extern pgprot_t pgprot_user; extern pgprot_t pgprot_kernel; -extern pgprot_t pgprot_hyp_device; -extern pgprot_t pgprot_s2; -extern pgprot_t pgprot_s2_device; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) @@ -85,10 +82,6 @@ extern pgprot_t pgprot_s2_device; #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) -#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) -#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) -#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) diff --git a/trunk/arch/arm/include/uapi/asm/kvm.h b/trunk/arch/arm/include/uapi/asm/kvm.h deleted file mode 100644 index 3303ff5adbf3..000000000000 --- a/trunk/arch/arm/include/uapi/asm/kvm.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_H__ -#define __ARM_KVM_H__ - -#include -#include - -#define __KVM_HAVE_GUEST_DEBUG -#define __KVM_HAVE_IRQ_LINE - -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - -/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ -#define KVM_ARM_SVC_sp svc_regs[0] -#define KVM_ARM_SVC_lr svc_regs[1] -#define KVM_ARM_SVC_spsr svc_regs[2] -#define KVM_ARM_ABT_sp abt_regs[0] -#define KVM_ARM_ABT_lr abt_regs[1] -#define KVM_ARM_ABT_spsr abt_regs[2] -#define KVM_ARM_UND_sp und_regs[0] -#define KVM_ARM_UND_lr und_regs[1] -#define KVM_ARM_UND_spsr und_regs[2] -#define KVM_ARM_IRQ_sp irq_regs[0] -#define KVM_ARM_IRQ_lr irq_regs[1] -#define KVM_ARM_IRQ_spsr irq_regs[2] - -/* Valid only for fiq_regs in struct kvm_regs */ -#define KVM_ARM_FIQ_r8 fiq_regs[0] -#define KVM_ARM_FIQ_r9 fiq_regs[1] -#define KVM_ARM_FIQ_r10 fiq_regs[2] -#define KVM_ARM_FIQ_fp fiq_regs[3] -#define KVM_ARM_FIQ_ip fiq_regs[4] -#define KVM_ARM_FIQ_sp fiq_regs[5] -#define KVM_ARM_FIQ_lr fiq_regs[6] -#define KVM_ARM_FIQ_spsr fiq_regs[7] - -struct kvm_regs { - struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ - __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ - __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ - __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ - __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ - __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ -}; - -/* Supported Processor Types */ -#define KVM_ARM_TARGET_CORTEX_A15 0 -#define KVM_ARM_NUM_TARGETS 1 - -#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ - -struct kvm_vcpu_init { - __u32 target; - __u32 features[7]; -}; - -struct kvm_sregs { -}; - -struct kvm_fpu { -}; - -struct kvm_guest_debug_arch { -}; - -struct kvm_debug_exit_arch { -}; - -struct kvm_sync_regs { -}; - -struct kvm_arch_memory_slot { -}; - -/* If you need to interpret the index values, here is the key: */ -#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 -#define KVM_REG_ARM_COPROC_SHIFT 16 -#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 -#define KVM_REG_ARM_32_OPC2_SHIFT 0 -#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 -#define KVM_REG_ARM_OPC1_SHIFT 3 -#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 -#define KVM_REG_ARM_CRM_SHIFT 7 -#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 -#define KVM_REG_ARM_32_CRN_SHIFT 11 - -/* Normal registers are mapped as coprocessor 16. */ -#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) - -/* Some registers need more space to represent values. */ -#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 -#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 -#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) -#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF -#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 - -/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ -#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF -#define KVM_REG_ARM_VFP_BASE_REG 0x0 -#define KVM_REG_ARM_VFP_FPSID 0x1000 -#define KVM_REG_ARM_VFP_FPSCR 0x1001 -#define KVM_REG_ARM_VFP_MVFR1 0x1006 -#define KVM_REG_ARM_VFP_MVFR0 0x1007 -#define KVM_REG_ARM_VFP_FPEXC 0x1008 -#define KVM_REG_ARM_VFP_FPINST 0x1009 -#define KVM_REG_ARM_VFP_FPINST2 0x100A - - -/* KVM_IRQ_LINE irq field index values */ -#define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff -#define KVM_ARM_IRQ_VCPU_SHIFT 16 -#define KVM_ARM_IRQ_VCPU_MASK 0xff -#define KVM_ARM_IRQ_NUM_SHIFT 0 -#define KVM_ARM_IRQ_NUM_MASK 0xffff - -/* irq_type field */ -#define KVM_ARM_IRQ_TYPE_CPU 0 -#define KVM_ARM_IRQ_TYPE_SPI 1 -#define KVM_ARM_IRQ_TYPE_PPI 2 - -/* out-of-kernel GIC cpu interrupt injection irq_number field */ -#define KVM_ARM_IRQ_CPU_IRQ 0 -#define KVM_ARM_IRQ_CPU_FIQ 1 - -/* Highest supported SPI, from VGIC_NR_IRQS */ -#define KVM_ARM_IRQ_GIC_MAX 127 - -/* PSCI interface */ -#define KVM_PSCI_FN_BASE 0x95c1ba5e -#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) - -#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) -#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) -#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) -#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) - -#define KVM_PSCI_RET_SUCCESS 0 -#define KVM_PSCI_RET_NI ((unsigned long)-1) -#define KVM_PSCI_RET_INVAL ((unsigned long)-2) -#define KVM_PSCI_RET_DENIED ((unsigned long)-3) - -#endif /* __ARM_KVM_H__ */ diff --git a/trunk/arch/arm/kernel/arch_timer.c b/trunk/arch/arm/kernel/arch_timer.c index c8ef20747ee7..6dd73c67d864 100644 --- a/trunk/arch/arm/kernel/arch_timer.c +++ b/trunk/arch/arm/kernel/arch_timer.c @@ -488,6 +488,8 @@ int __init arch_timer_of_register(void) for (i = PHYS_SECURE_PPI; i < MAX_TIMER_PPI; i++) arch_timer_ppi[i] = irq_of_parse_and_map(np, i); + of_node_put(np); + /* * If no interrupt provided for virtual timer, we'll have to * stick to the physical timer. It'd better be accessible... diff --git a/trunk/arch/arm/kernel/asm-offsets.c b/trunk/arch/arm/kernel/asm-offsets.c index c8b3272dfed1..c985b481192c 100644 --- a/trunk/arch/arm/kernel/asm-offsets.c +++ b/trunk/arch/arm/kernel/asm-offsets.c @@ -13,9 +13,6 @@ #include #include #include -#ifdef CONFIG_KVM_ARM_HOST -#include -#endif #include #include #include @@ -149,27 +146,5 @@ int main(void) DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); -#ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); - DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); - DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); - DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); - DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); - DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); - DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); - DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); - DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); - DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); - DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); - DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); - DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); - DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); - DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); -#endif return 0; } diff --git a/trunk/arch/arm/kernel/debug.S b/trunk/arch/arm/kernel/debug.S index 14f7c3b14632..6809200c31fb 100644 --- a/trunk/arch/arm/kernel/debug.S +++ b/trunk/arch/arm/kernel/debug.S @@ -100,14 +100,12 @@ ENTRY(printch) b 1b ENDPROC(printch) -#ifdef CONFIG_MMU ENTRY(debug_ll_addr) addruart r2, r3, ip str r2, [r0] str r3, [r1] mov pc, lr ENDPROC(debug_ll_addr) -#endif #else diff --git a/trunk/arch/arm/kernel/head.S b/trunk/arch/arm/kernel/head.S index 486a15ae9011..4eee351f4668 100644 --- a/trunk/arch/arm/kernel/head.S +++ b/trunk/arch/arm/kernel/head.S @@ -246,7 +246,6 @@ __create_page_tables: /* * Then map boot params address in r2 if specified. - * We map 2 sections in case the ATAGs/DTB crosses a section boundary. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -254,8 +253,6 @@ __create_page_tables: addne r3, r3, #PAGE_OFFSET addne r3, r4, r3, lsr #(SECTION_SHIFT - PMD_ORDER) orrne r6, r7, r0 - strne r6, [r3], #1 << PMD_ORDER - addne r6, r6, #1 << SECTION_SHIFT strne r6, [r3] #ifdef CONFIG_DEBUG_LL @@ -334,7 +331,7 @@ ENTRY(secondary_startup) * as it has already been validated by the primary processor. */ #ifdef CONFIG_ARM_VIRT_EXT - bl __hyp_stub_install_secondary + bl __hyp_stub_install #endif safe_svcmode_maskall r9 diff --git a/trunk/arch/arm/kernel/hyp-stub.S b/trunk/arch/arm/kernel/hyp-stub.S index 1315c4ccfa56..65b2417aebce 100644 --- a/trunk/arch/arm/kernel/hyp-stub.S +++ b/trunk/arch/arm/kernel/hyp-stub.S @@ -99,7 +99,7 @@ ENTRY(__hyp_stub_install_secondary) * immediately. */ compare_cpu_mode_with_primary r4, r5, r6, r7 - movne pc, lr + bxne lr /* * Once we have given up on one CPU, we do not try to install the @@ -111,7 +111,7 @@ ENTRY(__hyp_stub_install_secondary) */ cmp r4, #HYP_MODE - movne pc, lr @ give up if the CPU is not in HYP mode + bxne lr @ give up if the CPU is not in HYP mode /* * Configure HSCTLR to set correct exception endianness/instruction set @@ -120,8 +120,7 @@ ENTRY(__hyp_stub_install_secondary) * Eventually, CPU-specific code might be needed -- assume not for now * * This code relies on the "eret" instruction to synchronize the - * various coprocessor accesses. This is done when we switch to SVC - * (see safe_svcmode_maskall). + * various coprocessor accesses. */ @ Now install the hypervisor stub: adr r7, __hyp_stub_vectors @@ -156,7 +155,14 @@ THUMB( orr r7, #(1 << 30) ) @ HSCTLR.TE 1: #endif - bx lr @ The boot CPU mode is left in r4. + bic r7, r4, #MODE_MASK + orr r7, r7, #SVC_MODE +THUMB( orr r7, r7, #PSR_T_BIT ) + msr spsr_cxsf, r7 @ This is SPSR_hyp. + + __MSR_ELR_HYP(14) @ msr elr_hyp, lr + __ERET @ return, switching to SVC mode + @ The boot CPU mode is left in r4. ENDPROC(__hyp_stub_install_secondary) __hyp_stub_do_trap: @@ -194,7 +200,7 @@ ENDPROC(__hyp_get_vectors) @ fall through ENTRY(__hyp_set_vectors) __HVC(0) - mov pc, lr + bx lr ENDPROC(__hyp_set_vectors) #ifndef ZIMAGE diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c index 31e0eb353cd8..f9e8657dd241 100644 --- a/trunk/arch/arm/kernel/perf_event.c +++ b/trunk/arch/arm/kernel/perf_event.c @@ -149,6 +149,12 @@ u64 armpmu_event_update(struct perf_event *event) static void armpmu_read(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + armpmu_event_update(event); } @@ -201,6 +207,8 @@ armpmu_del(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + WARN_ON(idx < 0); + armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; clear_bit(idx, hw_events->used_mask); @@ -350,7 +358,7 @@ __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - int mapping; + int mapping, err; mapping = armpmu->map_event(event); @@ -399,12 +407,14 @@ __hw_perf_event_init(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } + err = 0; if (event->group_leader != event) { - if (validate_group(event) != 0); + err = validate_group(event); + if (err) return -EINVAL; } - return 0; + return err; } static int armpmu_event_init(struct perf_event *event) diff --git a/trunk/arch/arm/kernel/perf_event_cpu.c b/trunk/arch/arm/kernel/perf_event_cpu.c index 1f2740e3dbc0..5f6620684e25 100644 --- a/trunk/arch/arm/kernel/perf_event_cpu.c +++ b/trunk/arch/arm/kernel/perf_event_cpu.c @@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->free_irq = cpu_pmu_free_irq; /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu->reset) + if (cpu_pmu && cpu_pmu->reset) on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); } @@ -201,46 +201,48 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = { static int probe_current_pmu(struct arm_pmu *pmu) { int cpu = get_cpu(); - unsigned long implementor = read_cpuid_implementor(); - unsigned long part_number = read_cpuid_part_number(); + unsigned long cpuid = read_cpuid_id(); + unsigned long implementor = (cpuid & 0xFF000000) >> 24; + unsigned long part_number = (cpuid & 0xFFF0); int ret = -ENODEV; pr_info("probing PMU on CPU %d\n", cpu); /* ARM Ltd CPUs. */ - if (implementor == ARM_CPU_IMP_ARM) { + if (0x41 == implementor) { switch (part_number) { - case ARM_CPU_PART_ARM1136: - case ARM_CPU_PART_ARM1156: - case ARM_CPU_PART_ARM1176: + case 0xB360: /* ARM1136 */ + case 0xB560: /* ARM1156 */ + case 0xB760: /* ARM1176 */ ret = armv6pmu_init(pmu); break; - case ARM_CPU_PART_ARM11MPCORE: + case 0xB020: /* ARM11mpcore */ ret = armv6mpcore_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A8: + case 0xC080: /* Cortex-A8 */ ret = armv7_a8_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A9: + case 0xC090: /* Cortex-A9 */ ret = armv7_a9_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A5: + case 0xC050: /* Cortex-A5 */ ret = armv7_a5_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A15: + case 0xC0F0: /* Cortex-A15 */ ret = armv7_a15_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A7: + case 0xC070: /* Cortex-A7 */ ret = armv7_a7_pmu_init(pmu); break; } /* Intel CPUs [xscale]. */ - } else if (implementor == ARM_CPU_IMP_INTEL) { - switch (xscale_cpu_arch_version()) { - case ARM_CPU_XSCALE_ARCH_V1: + } else if (0x69 == implementor) { + part_number = (cpuid >> 13) & 0x7; + switch (part_number) { + case 1: ret = xscale1pmu_init(pmu); break; - case ARM_CPU_XSCALE_ARCH_V2: + case 2: ret = xscale2pmu_init(pmu); break; } @@ -277,22 +279,17 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) } if (ret) { - pr_info("failed to probe PMU!"); - goto out_free; + pr_info("failed to register PMU devices!"); + kfree(pmu); + return ret; } cpu_pmu = pmu; cpu_pmu->plat_device = pdev; cpu_pmu_init(cpu_pmu); - ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); - - if (!ret) - return 0; + armpmu_register(cpu_pmu, PERF_TYPE_RAW); -out_free: - pr_info("failed to register PMU devices!"); - kfree(pmu); - return ret; + return 0; } static struct platform_driver cpu_pmu_driver = { diff --git a/trunk/arch/arm/kernel/perf_event_v6.c b/trunk/arch/arm/kernel/perf_event_v6.c index 03664b0e8fa4..041d0526a288 100644 --- a/trunk/arch/arm/kernel/perf_event_v6.c +++ b/trunk/arch/arm/kernel/perf_event_v6.c @@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/trunk/arch/arm/kernel/perf_event_v7.c b/trunk/arch/arm/kernel/perf_event_v7.c index 8c79a9e70b83..4fbc757d9cff 100644 --- a/trunk/arch/arm/kernel/perf_event_v7.c +++ b/trunk/arch/arm/kernel/perf_event_v7.c @@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, /* * The prefetch counters don't differentiate between the I @@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/trunk/arch/arm/kernel/perf_event_xscale.c b/trunk/arch/arm/kernel/perf_event_xscale.c index 63990c42fac9..2b0fe30ec12e 100644 --- a/trunk/arch/arm/kernel/perf_event_xscale.c +++ b/trunk/arch/arm/kernel/perf_event_xscale.c @@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index b7e3b506219b..84f4cbf652e5 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -475,8 +475,14 @@ u64 smp_irq_stat_cpu(unsigned int cpu) */ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent); +static void ipi_timer(void) +{ + struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent); + evt->event_handler(evt); +} + #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST -void tick_broadcast(const struct cpumask *mask) +static void smp_timer_broadcast(const struct cpumask *mask) { smp_cross_call(mask, IPI_TIMER); } @@ -524,6 +530,7 @@ static void __cpuinit percpu_timer_setup(void) struct clock_event_device *evt = &per_cpu(percpu_clockevent, cpu); evt->cpumask = cpumask_of(cpu); + evt->broadcast = smp_timer_broadcast; if (!lt_ops || lt_ops->setup(evt)) broadcast_timer_setup(evt); @@ -589,13 +596,11 @@ void handle_IPI(int ipinr, struct pt_regs *regs) case IPI_WAKEUP: break; -#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST case IPI_TIMER: irq_enter(); - tick_receive_broadcast(); + ipi_timer(); irq_exit(); break; -#endif case IPI_RESCHEDULE: scheduler_ipi(); diff --git a/trunk/arch/arm/kernel/vmlinux.lds.S b/trunk/arch/arm/kernel/vmlinux.lds.S index b571484e9f03..11c1785bf63e 100644 --- a/trunk/arch/arm/kernel/vmlinux.lds.S +++ b/trunk/arch/arm/kernel/vmlinux.lds.S @@ -19,11 +19,7 @@ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ - VMLINUX_SYMBOL(__idmap_text_end) = .; \ - ALIGN_FUNCTION(); \ - VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ - *(.hyp.idmap.text) \ - VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; + VMLINUX_SYMBOL(__idmap_text_end) = .; #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) diff --git a/trunk/arch/arm/kvm/Kconfig b/trunk/arch/arm/kvm/Kconfig deleted file mode 100644 index 05227cb57a7b..000000000000 --- a/trunk/arch/arm/kvm/Kconfig +++ /dev/null @@ -1,56 +0,0 @@ -# -# KVM configuration -# - -source "virt/kvm/Kconfig" - -menuconfig VIRTUALIZATION - bool "Virtualization" - ---help--- - Say Y here to get to see options for using your Linux host to run - other operating systems inside virtual machines (guests). - This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and - disabled. - -if VIRTUALIZATION - -config KVM - bool "Kernel-based Virtual Machine (KVM) support" - select PREEMPT_NOTIFIERS - select ANON_INODES - select KVM_MMIO - select KVM_ARM_HOST - depends on ARM_VIRT_EXT && ARM_LPAE - ---help--- - Support hosting virtualized guest machines. You will also - need to select one or more of the processor modules below. - - This module provides access to the hardware capabilities through - a character device node named /dev/kvm. - - If unsure, say N. - -config KVM_ARM_HOST - bool "KVM host support for ARM cpus." - depends on KVM - depends on MMU - select MMU_NOTIFIER - ---help--- - Provides host support for ARM processors. - -config KVM_ARM_MAX_VCPUS - int "Number maximum supported virtual CPUs per VM" - depends on KVM_ARM_HOST - default 4 - help - Static number of max supported virtual CPUs per VM. - - If you choose a high number, the vcpu structures will be quite - large, so only choose a reasonable number that you expect to - actually use. - -source drivers/virtio/Kconfig - -endif # VIRTUALIZATION diff --git a/trunk/arch/arm/kvm/Makefile b/trunk/arch/arm/kvm/Makefile deleted file mode 100644 index ea27987bd07f..000000000000 --- a/trunk/arch/arm/kvm/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# -# Makefile for Kernel-based Virtual Machine module -# - -plus_virt := $(call as-instr,.arch_extension virt,+virt) -ifeq ($(plus_virt),+virt) - plus_virt_def := -DREQUIRES_VIRT=1 -endif - -ccflags-y += -Ivirt/kvm -Iarch/arm/kvm -CFLAGS_arm.o := -I. $(plus_virt_def) -CFLAGS_mmu.o := -I. - -AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) -AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) - -kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) - -obj-y += kvm-arm.o init.o interrupts.o -obj-y += arm.o guest.o mmu.o emulate.o reset.o -obj-y += coproc.o coproc_a15.o mmio.o psci.o diff --git a/trunk/arch/arm/kvm/arm.c b/trunk/arch/arm/kvm/arm.c deleted file mode 100644 index 2d30e3afdaf9..000000000000 --- a/trunk/arch/arm/kvm/arm.c +++ /dev/null @@ -1,1015 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include "trace.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef REQUIRES_VIRT -__asm__(".arch_extension virt"); -#endif - -static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); -static struct vfp_hard_struct __percpu *kvm_host_vfp_state; -static unsigned long hyp_default_vectors; - -/* The VMID used in the VTTBR */ -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); -static u8 kvm_next_vmid; -static DEFINE_SPINLOCK(kvm_vmid_lock); - -int kvm_arch_hardware_enable(void *garbage) -{ - return 0; -} - -int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; -} - -void kvm_arch_hardware_disable(void *garbage) -{ -} - -int kvm_arch_hardware_setup(void) -{ - return 0; -} - -void kvm_arch_hardware_unsetup(void) -{ -} - -void kvm_arch_check_processor_compat(void *rtn) -{ - *(int *)rtn = 0; -} - -void kvm_arch_sync_events(struct kvm *kvm) -{ -} - -/** - * kvm_arch_init_vm - initializes a VM data structure - * @kvm: pointer to the KVM struct - */ -int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) -{ - int ret = 0; - - if (type) - return -EINVAL; - - ret = kvm_alloc_stage2_pgd(kvm); - if (ret) - goto out_fail_alloc; - - ret = create_hyp_mappings(kvm, kvm + 1); - if (ret) - goto out_free_stage2_pgd; - - /* Mark the initial VMID generation invalid */ - kvm->arch.vmid_gen = 0; - - return ret; -out_free_stage2_pgd: - kvm_free_stage2_pgd(kvm); -out_fail_alloc: - return ret; -} - -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} - -void kvm_arch_free_memslot(struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ -} - -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) -{ - return 0; -} - -/** - * kvm_arch_destroy_vm - destroy the VM data structure - * @kvm: pointer to the KVM struct - */ -void kvm_arch_destroy_vm(struct kvm *kvm) -{ - int i; - - kvm_free_stage2_pgd(kvm); - - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - if (kvm->vcpus[i]) { - kvm_arch_vcpu_free(kvm->vcpus[i]); - kvm->vcpus[i] = NULL; - } - } -} - -int kvm_dev_ioctl_check_extension(long ext) -{ - int r; - switch (ext) { - case KVM_CAP_USER_MEMORY: - case KVM_CAP_SYNC_MMU: - case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: - case KVM_CAP_ONE_REG: - case KVM_CAP_ARM_PSCI: - r = 1; - break; - case KVM_CAP_COALESCED_MMIO: - r = KVM_COALESCED_MMIO_PAGE_OFFSET; - break; - case KVM_CAP_NR_VCPUS: - r = num_online_cpus(); - break; - case KVM_CAP_MAX_VCPUS: - r = KVM_MAX_VCPUS; - break; - default: - r = 0; - break; - } - return r; -} - -long kvm_arch_dev_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - return -EINVAL; -} - -int kvm_arch_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old, - int user_alloc) -{ - return 0; -} - -int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_memory_slot old, - struct kvm_userspace_memory_region *mem, - int user_alloc) -{ - return 0; -} - -void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old, - int user_alloc) -{ -} - -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ -} - -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) -{ - int err; - struct kvm_vcpu *vcpu; - - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - if (!vcpu) { - err = -ENOMEM; - goto out; - } - - err = kvm_vcpu_init(vcpu, kvm, id); - if (err) - goto free_vcpu; - - err = create_hyp_mappings(vcpu, vcpu + 1); - if (err) - goto vcpu_uninit; - - return vcpu; -vcpu_uninit: - kvm_vcpu_uninit(vcpu); -free_vcpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); -out: - return ERR_PTR(err); -} - -int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) -{ - return 0; -} - -void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) -{ - kvm_mmu_free_memory_caches(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); -} - -void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) -{ - kvm_arch_vcpu_free(vcpu); -} - -int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) -{ - return 0; -} - -int __attribute_const__ kvm_target_cpu(void) -{ - unsigned long implementor = read_cpuid_implementor(); - unsigned long part_number = read_cpuid_part_number(); - - if (implementor != ARM_CPU_IMP_ARM) - return -EINVAL; - - switch (part_number) { - case ARM_CPU_PART_CORTEX_A15: - return KVM_ARM_TARGET_CORTEX_A15; - default: - return -EINVAL; - } -} - -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - /* Force users to call KVM_ARM_VCPU_INIT */ - vcpu->arch.target = -1; - return 0; -} - -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ -} - -void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - vcpu->cpu = cpu; - vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); - - /* - * Check whether this vcpu requires the cache to be flushed on - * this physical CPU. This is a consequence of doing dcache - * operations by set/way on this vcpu. We do it here to be in - * a non-preemptible section. - */ - if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) - flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ -} - -void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) -{ -} - -int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) -{ - return -EINVAL; -} - - -int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - return -EINVAL; -} - -/** - * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled - * @v: The VCPU pointer - * - * If the guest CPU is not waiting for interrupts or an interrupt line is - * asserted, the CPU is by definition runnable. - */ -int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) -{ - return !!v->arch.irq_lines; -} - -/* Just ensure a guest exit from a particular CPU */ -static void exit_vm_noop(void *info) -{ -} - -void force_vm_exit(const cpumask_t *mask) -{ - smp_call_function_many(mask, exit_vm_noop, NULL, true); -} - -/** - * need_new_vmid_gen - check that the VMID is still valid - * @kvm: The VM's VMID to checkt - * - * return true if there is a new generation of VMIDs being used - * - * The hardware supports only 256 values with the value zero reserved for the - * host, so we check if an assigned value belongs to a previous generation, - * which which requires us to assign a new value. If we're the first to use a - * VMID for the new generation, we must flush necessary caches and TLBs on all - * CPUs. - */ -static bool need_new_vmid_gen(struct kvm *kvm) -{ - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); -} - -/** - * update_vttbr - Update the VTTBR with a valid VMID before the guest runs - * @kvm The guest that we are about to run - * - * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the - * VM has a valid VMID, otherwise assigns a new one and flushes corresponding - * caches and TLBs. - */ -static void update_vttbr(struct kvm *kvm) -{ - phys_addr_t pgd_phys; - u64 vmid; - - if (!need_new_vmid_gen(kvm)) - return; - - spin_lock(&kvm_vmid_lock); - - /* - * We need to re-check the vmid_gen here to ensure that if another vcpu - * already allocated a valid vmid for this vm, then this vcpu should - * use the same vmid. - */ - if (!need_new_vmid_gen(kvm)) { - spin_unlock(&kvm_vmid_lock); - return; - } - - /* First user of a new VMID generation? */ - if (unlikely(kvm_next_vmid == 0)) { - atomic64_inc(&kvm_vmid_gen); - kvm_next_vmid = 1; - - /* - * On SMP we know no other CPUs can use this CPU's or each - * other's VMID after force_vm_exit returns since the - * kvm_vmid_lock blocks them from reentry to the guest. - */ - force_vm_exit(cpu_all_mask); - /* - * Now broadcast TLB + ICACHE invalidation over the inner - * shareable domain to make sure all data structures are - * clean. - */ - kvm_call_hyp(__kvm_flush_vm_context); - } - - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); - kvm->arch.vmid = kvm_next_vmid; - kvm_next_vmid++; - - /* update vttbr to be used with the new vmid */ - pgd_phys = virt_to_phys(kvm->arch.pgd); - vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; - kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; - kvm->arch.vttbr |= vmid; - - spin_unlock(&kvm_vmid_lock); -} - -static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* SVC called from Hyp mode should never get here */ - kvm_debug("SVC called from Hyp mode shouldn't go here\n"); - BUG(); - return -EINVAL; /* Squash warning */ -} - -static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), - vcpu->arch.hsr & HSR_HVC_IMM_MASK); - - if (kvm_psci_call(vcpu)) - return 1; - - kvm_inject_undefined(vcpu); - return 1; -} - -static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - if (kvm_psci_call(vcpu)) - return 1; - - kvm_inject_undefined(vcpu); - return 1; -} - -static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* The hypervisor should never cause aborts */ - kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", - vcpu->arch.hxfar, vcpu->arch.hsr); - return -EFAULT; -} - -static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* This is either an error in the ws. code or an external abort */ - kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", - vcpu->arch.hxfar, vcpu->arch.hsr); - return -EFAULT; -} - -typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); -static exit_handle_fn arm_exit_handlers[] = { - [HSR_EC_WFI] = kvm_handle_wfi, - [HSR_EC_CP15_32] = kvm_handle_cp15_32, - [HSR_EC_CP15_64] = kvm_handle_cp15_64, - [HSR_EC_CP14_MR] = kvm_handle_cp14_access, - [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, - [HSR_EC_CP14_64] = kvm_handle_cp14_access, - [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, - [HSR_EC_CP10_ID] = kvm_handle_cp10_id, - [HSR_EC_SVC_HYP] = handle_svc_hyp, - [HSR_EC_HVC] = handle_hvc, - [HSR_EC_SMC] = handle_smc, - [HSR_EC_IABT] = kvm_handle_guest_abort, - [HSR_EC_IABT_HYP] = handle_pabt_hyp, - [HSR_EC_DABT] = kvm_handle_guest_abort, - [HSR_EC_DABT_HYP] = handle_dabt_hyp, -}; - -/* - * A conditional instruction is allowed to trap, even though it - * wouldn't be executed. So let's re-implement the hardware, in - * software! - */ -static bool kvm_condition_valid(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr, cond, insn; - - /* - * Exception Code 0 can only happen if we set HCR.TGE to 1, to - * catch undefined instructions, and then we won't get past - * the arm_exit_handlers test anyway. - */ - BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); - - /* Top two bits non-zero? Unconditional. */ - if (vcpu->arch.hsr >> 30) - return true; - - cpsr = *vcpu_cpsr(vcpu); - - /* Is condition field valid? */ - if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) - cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; - else { - /* This can happen in Thumb mode: examine IT state. */ - unsigned long it; - - it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); - - /* it == 0 => unconditional. */ - if (it == 0) - return true; - - /* The cond for this insn works out as the top 4 bits. */ - cond = (it >> 4); - } - - /* Shift makes it look like an ARM-mode instruction */ - insn = cond << 28; - return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; -} - -/* - * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on - * proper exit to QEMU. - */ -static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index) -{ - unsigned long hsr_ec; - - switch (exception_index) { - case ARM_EXCEPTION_IRQ: - return 1; - case ARM_EXCEPTION_UNDEFINED: - kvm_err("Undefined exception in Hyp mode at: %#08x\n", - vcpu->arch.hyp_pc); - BUG(); - panic("KVM: Hypervisor undefined exception!\n"); - case ARM_EXCEPTION_DATA_ABORT: - case ARM_EXCEPTION_PREF_ABORT: - case ARM_EXCEPTION_HVC: - hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; - - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) - || !arm_exit_handlers[hsr_ec]) { - kvm_err("Unkown exception class: %#08lx, " - "hsr: %#08x\n", hsr_ec, - (unsigned int)vcpu->arch.hsr); - BUG(); - } - - /* - * See ARM ARM B1.14.1: "Hyp traps on instructions - * that fail their condition code check" - */ - if (!kvm_condition_valid(vcpu)) { - bool is_wide = vcpu->arch.hsr & HSR_IL; - kvm_skip_instr(vcpu, is_wide); - return 1; - } - - return arm_exit_handlers[hsr_ec](vcpu, run); - default: - kvm_pr_unimpl("Unsupported exception type: %d", - exception_index); - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return 0; - } -} - -static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) -{ - if (likely(vcpu->arch.has_run_once)) - return 0; - - vcpu->arch.has_run_once = true; - - /* - * Handle the "start in power-off" case by calling into the - * PSCI code. - */ - if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { - *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; - kvm_psci_call(vcpu); - } - - return 0; -} - -static void vcpu_pause(struct kvm_vcpu *vcpu) -{ - wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); - - wait_event_interruptible(*wq, !vcpu->arch.pause); -} - -/** - * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code - * @vcpu: The VCPU pointer - * @run: The kvm_run structure pointer used for userspace state exchange - * - * This function is called through the VCPU_RUN ioctl called from user space. It - * will execute VM code in a loop until the time slice for the process is used - * or some emulation is needed from user space in which case the function will - * return with return value 0 and with the kvm_run structure filled in with the - * required data for the requested emulation. - */ -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - int ret; - sigset_t sigsaved; - - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) - return -ENOEXEC; - - ret = kvm_vcpu_first_run_init(vcpu); - if (ret) - return ret; - - if (run->exit_reason == KVM_EXIT_MMIO) { - ret = kvm_handle_mmio_return(vcpu, vcpu->run); - if (ret) - return ret; - } - - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - - ret = 1; - run->exit_reason = KVM_EXIT_UNKNOWN; - while (ret > 0) { - /* - * Check conditions before entering the guest - */ - cond_resched(); - - update_vttbr(vcpu->kvm); - - if (vcpu->arch.pause) - vcpu_pause(vcpu); - - local_irq_disable(); - - /* - * Re-check atomic conditions - */ - if (signal_pending(current)) { - ret = -EINTR; - run->exit_reason = KVM_EXIT_INTR; - } - - if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { - local_irq_enable(); - continue; - } - - /************************************************************** - * Enter the guest - */ - trace_kvm_entry(*vcpu_pc(vcpu)); - kvm_guest_enter(); - vcpu->mode = IN_GUEST_MODE; - - ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); - - vcpu->mode = OUTSIDE_GUEST_MODE; - vcpu->arch.last_pcpu = smp_processor_id(); - kvm_guest_exit(); - trace_kvm_exit(*vcpu_pc(vcpu)); - /* - * We may have taken a host interrupt in HYP mode (ie - * while executing the guest). This interrupt is still - * pending, as we haven't serviced it yet! - * - * We're now back in SVC mode, with interrupts - * disabled. Enabling the interrupts now will have - * the effect of taking the interrupt again, in SVC - * mode this time. - */ - local_irq_enable(); - - /* - * Back from guest - *************************************************************/ - - ret = handle_exit(vcpu, run, ret); - } - - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - return ret; -} - -static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) -{ - int bit_index; - bool set; - unsigned long *ptr; - - if (number == KVM_ARM_IRQ_CPU_IRQ) - bit_index = __ffs(HCR_VI); - else /* KVM_ARM_IRQ_CPU_FIQ */ - bit_index = __ffs(HCR_VF); - - ptr = (unsigned long *)&vcpu->arch.irq_lines; - if (level) - set = test_and_set_bit(bit_index, ptr); - else - set = test_and_clear_bit(bit_index, ptr); - - /* - * If we didn't change anything, no need to wake up or kick other CPUs - */ - if (set == level) - return 0; - - /* - * The vcpu irq_lines field was updated, wake up sleeping VCPUs and - * trigger a world-switch round on the running physical CPU to set the - * virtual IRQ/FIQ fields in the HCR appropriately. - */ - kvm_vcpu_kick(vcpu); - - return 0; -} - -int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level) -{ - u32 irq = irq_level->irq; - unsigned int irq_type, vcpu_idx, irq_num; - int nrcpus = atomic_read(&kvm->online_vcpus); - struct kvm_vcpu *vcpu = NULL; - bool level = irq_level->level; - - irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; - vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; - irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; - - trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); - - if (irq_type != KVM_ARM_IRQ_TYPE_CPU) - return -EINVAL; - - if (vcpu_idx >= nrcpus) - return -EINVAL; - - vcpu = kvm_get_vcpu(kvm, vcpu_idx); - if (!vcpu) - return -EINVAL; - - if (irq_num > KVM_ARM_IRQ_CPU_FIQ) - return -EINVAL; - - return vcpu_interrupt_line(vcpu, irq_num, level); -} - -long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - struct kvm_vcpu *vcpu = filp->private_data; - void __user *argp = (void __user *)arg; - - switch (ioctl) { - case KVM_ARM_VCPU_INIT: { - struct kvm_vcpu_init init; - - if (copy_from_user(&init, argp, sizeof(init))) - return -EFAULT; - - return kvm_vcpu_set_target(vcpu, &init); - - } - case KVM_SET_ONE_REG: - case KVM_GET_ONE_REG: { - struct kvm_one_reg reg; - if (copy_from_user(®, argp, sizeof(reg))) - return -EFAULT; - if (ioctl == KVM_SET_ONE_REG) - return kvm_arm_set_reg(vcpu, ®); - else - return kvm_arm_get_reg(vcpu, ®); - } - case KVM_GET_REG_LIST: { - struct kvm_reg_list __user *user_list = argp; - struct kvm_reg_list reg_list; - unsigned n; - - if (copy_from_user(®_list, user_list, sizeof(reg_list))) - return -EFAULT; - n = reg_list.n; - reg_list.n = kvm_arm_num_regs(vcpu); - if (copy_to_user(user_list, ®_list, sizeof(reg_list))) - return -EFAULT; - if (n < reg_list.n) - return -E2BIG; - return kvm_arm_copy_reg_indices(vcpu, user_list->reg); - } - default: - return -EINVAL; - } -} - -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) -{ - return -EINVAL; -} - -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - return -EINVAL; -} - -static void cpu_init_hyp_mode(void *vector) -{ - unsigned long long pgd_ptr; - unsigned long pgd_low, pgd_high; - unsigned long hyp_stack_ptr; - unsigned long stack_page; - unsigned long vector_ptr; - - /* Switch from the HYP stub to our own HYP init vector */ - __hyp_set_vectors((unsigned long)vector); - - pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); - pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); - pgd_high = (pgd_ptr >> 32ULL); - stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); - hyp_stack_ptr = stack_page + PAGE_SIZE; - vector_ptr = (unsigned long)__kvm_hyp_vector; - - /* - * Call initialization code, and switch to the full blown - * HYP code. The init code doesn't need to preserve these registers as - * r1-r3 and r12 are already callee save according to the AAPCS. - * Note that we slightly misuse the prototype by casing the pgd_low to - * a void *. - */ - kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); -} - -/** - * Inits Hyp-mode on all online CPUs - */ -static int init_hyp_mode(void) -{ - phys_addr_t init_phys_addr; - int cpu; - int err = 0; - - /* - * Allocate Hyp PGD and setup Hyp identity mapping - */ - err = kvm_mmu_init(); - if (err) - goto out_err; - - /* - * It is probably enough to obtain the default on one - * CPU. It's unlikely to be different on the others. - */ - hyp_default_vectors = __hyp_get_vectors(); - - /* - * Allocate stack pages for Hypervisor-mode - */ - for_each_possible_cpu(cpu) { - unsigned long stack_page; - - stack_page = __get_free_page(GFP_KERNEL); - if (!stack_page) { - err = -ENOMEM; - goto out_free_stack_pages; - } - - per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; - } - - /* - * Execute the init code on each CPU. - * - * Note: The stack is not mapped yet, so don't do anything else than - * initializing the hypervisor mode on each CPU using a local stack - * space for temporary storage. - */ - init_phys_addr = virt_to_phys(__kvm_hyp_init); - for_each_online_cpu(cpu) { - smp_call_function_single(cpu, cpu_init_hyp_mode, - (void *)(long)init_phys_addr, 1); - } - - /* - * Unmap the identity mapping - */ - kvm_clear_hyp_idmap(); - - /* - * Map the Hyp-code called directly from the host - */ - err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); - if (err) { - kvm_err("Cannot map world-switch code\n"); - goto out_free_mappings; - } - - /* - * Map the Hyp stack pages - */ - for_each_possible_cpu(cpu) { - char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); - err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); - - if (err) { - kvm_err("Cannot map hyp stack\n"); - goto out_free_mappings; - } - } - - /* - * Map the host VFP structures - */ - kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); - if (!kvm_host_vfp_state) { - err = -ENOMEM; - kvm_err("Cannot allocate host VFP state\n"); - goto out_free_mappings; - } - - for_each_possible_cpu(cpu) { - struct vfp_hard_struct *vfp; - - vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); - err = create_hyp_mappings(vfp, vfp + 1); - - if (err) { - kvm_err("Cannot map host VFP state: %d\n", err); - goto out_free_vfp; - } - } - - kvm_info("Hyp mode initialized successfully\n"); - return 0; -out_free_vfp: - free_percpu(kvm_host_vfp_state); -out_free_mappings: - free_hyp_pmds(); -out_free_stack_pages: - for_each_possible_cpu(cpu) - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); -out_err: - kvm_err("error initializing Hyp mode: %d\n", err); - return err; -} - -/** - * Initialize Hyp-mode and memory mappings on all CPUs. - */ -int kvm_arch_init(void *opaque) -{ - int err; - - if (!is_hyp_mode_available()) { - kvm_err("HYP mode not available\n"); - return -ENODEV; - } - - if (kvm_target_cpu() < 0) { - kvm_err("Target CPU not supported!\n"); - return -ENODEV; - } - - err = init_hyp_mode(); - if (err) - goto out_err; - - kvm_coproc_table_init(); - return 0; -out_err: - return err; -} - -/* NOP: Compiling as a module not supported */ -void kvm_arch_exit(void) -{ -} - -static int arm_init(void) -{ - int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); - return rc; -} - -module_init(arm_init); diff --git a/trunk/arch/arm/kvm/coproc.c b/trunk/arch/arm/kvm/coproc.c deleted file mode 100644 index d782638c7ec0..000000000000 --- a/trunk/arch/arm/kvm/coproc.c +++ /dev/null @@ -1,1046 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Rusty Russell - * Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../vfp/vfpinstr.h" - -#include "trace.h" -#include "coproc.h" - - -/****************************************************************************** - * Co-processor emulation - *****************************************************************************/ - -/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ -static u32 cache_levels; - -/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ -#define CSSELR_MAX 12 - -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* - * We can get here, if the host has been built without VFPv3 support, - * but the guest attempted a floating point operation. - */ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -/* See note at ARM ARM B1.14.4 */ -static bool access_dcsw(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - u32 val; - int cpu; - - cpu = get_cpu(); - - if (!p->is_write) - return read_from_write_only(vcpu, p); - - cpumask_setall(&vcpu->arch.require_dcache_flush); - cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); - - /* If we were already preempted, take the long way around */ - if (cpu != vcpu->arch.last_pcpu) { - flush_cache_all(); - goto done; - } - - val = *vcpu_reg(vcpu, p->Rt1); - - switch (p->CRm) { - case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ - case 14: /* DCCISW */ - asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); - break; - - case 10: /* DCCSW */ - asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); - break; - } - -done: - put_cpu(); - - return true; -} - -/* - * We could trap ID_DFR0 and tell the guest we don't support performance - * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was - * NAKed, so it will read the PMCR anyway. - * - * Therefore we tell the guest we have 0 counters. Unfortunately, we - * must always support PMCCNTR (the cycle counter): we just RAZ/WI for - * all PM registers, which doesn't crash the guest kernel at least. - */ -static bool pm_fake(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - else - return read_zero(vcpu, p); -} - -#define access_pmcr pm_fake -#define access_pmcntenset pm_fake -#define access_pmcntenclr pm_fake -#define access_pmovsr pm_fake -#define access_pmselr pm_fake -#define access_pmceid0 pm_fake -#define access_pmceid1 pm_fake -#define access_pmccntr pm_fake -#define access_pmxevtyper pm_fake -#define access_pmxevcntr pm_fake -#define access_pmuserenr pm_fake -#define access_pmintenset pm_fake -#define access_pmintenclr pm_fake - -/* Architected CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 - */ -static const struct coproc_reg cp15_regs[] = { - /* CSSELR: swapped by interrupt.S. */ - { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, - NULL, reset_unknown, c0_CSSELR }, - - /* TTBR0/TTBR1: swapped by interrupt.S. */ - { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, - { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, - - /* TTBCR: swapped by interrupt.S. */ - { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_val, c2_TTBCR, 0x00000000 }, - - /* DACR: swapped by interrupt.S. */ - { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c3_DACR }, - - /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ - { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c5_DFSR }, - { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c5_IFSR }, - { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c5_ADFSR }, - { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c5_AIFSR }, - - /* DFAR/IFAR: swapped by interrupt.S. */ - { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c6_DFAR }, - { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_unknown, c6_IFAR }, - /* - * DC{C,I,CI}SW operations: - */ - { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, - { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, - { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, - /* - * Dummy performance monitor implementation. - */ - { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, - { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, - { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, - { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, - { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, - { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, - { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, - { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, - { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, - - /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ - { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c10_PRRR}, - { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c10_NMRR}, - - /* VBAR: swapped by interrupt.S. */ - { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_val, c12_VBAR, 0x00000000 }, - - /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ - { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, - NULL, reset_val, c13_CID, 0x00000000 }, - { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_unknown, c13_TID_URW }, - { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, - NULL, reset_unknown, c13_TID_URO }, - { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, - NULL, reset_unknown, c13_TID_PRIV }, -}; - -/* Target specific emulation tables */ -static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; - -void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) -{ - target_tables[table->target] = table; -} - -/* Get specific register table for this target. */ -static const struct coproc_reg *get_target_table(unsigned target, size_t *num) -{ - struct kvm_coproc_target_table *table; - - table = target_tables[target]; - *num = table->num; - return table->table; -} - -static const struct coproc_reg *find_reg(const struct coproc_params *params, - const struct coproc_reg table[], - unsigned int num) -{ - unsigned int i; - - for (i = 0; i < num; i++) { - const struct coproc_reg *r = &table[i]; - - if (params->is_64bit != r->is_64) - continue; - if (params->CRn != r->CRn) - continue; - if (params->CRm != r->CRm) - continue; - if (params->Op1 != r->Op1) - continue; - if (params->Op2 != r->Op2) - continue; - - return r; - } - return NULL; -} - -static int emulate_cp15(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - size_t num; - const struct coproc_reg *table, *r; - - trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, - params->CRm, params->Op2, params->is_write); - - table = get_target_table(vcpu->arch.target, &num); - - /* Search target-specific then generic table. */ - r = find_reg(params, table, num); - if (!r) - r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); - - if (likely(r)) { - /* If we don't have an accessor, we should never get here! */ - BUG_ON(!r->access); - - if (likely(r->access(vcpu, params, r))) { - /* Skip instruction, since it was emulated */ - kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); - return 1; - } - /* If access function fails, it should complain. */ - } else { - kvm_err("Unsupported guest CP15 access at: %08x\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - } - kvm_inject_undefined(vcpu); - return 1; -} - -/** - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params; - - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; - params.is_write = ((vcpu->arch.hsr & 1) == 0); - params.is_64bit = true; - - params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; - params.Op2 = 0; - params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; - params.CRn = 0; - - return emulate_cp15(vcpu, ¶ms); -} - -static void reset_coproc_regs(struct kvm_vcpu *vcpu, - const struct coproc_reg *table, size_t num) -{ - unsigned long i; - - for (i = 0; i < num; i++) - if (table[i].reset) - table[i].reset(vcpu, &table[i]); -} - -/** - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params; - - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; - params.is_write = ((vcpu->arch.hsr & 1) == 0); - params.is_64bit = false; - - params.CRn = (vcpu->arch.hsr >> 10) & 0xf; - params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; - params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; - params.Rt2 = 0; - - return emulate_cp15(vcpu, ¶ms); -} - -/****************************************************************************** - * Userspace API - *****************************************************************************/ - -static bool index_to_params(u64 id, struct coproc_params *params) -{ - switch (id & KVM_REG_SIZE_MASK) { - case KVM_REG_SIZE_U32: - /* Any unused index bits means it's not valid. */ - if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK - | KVM_REG_ARM_COPROC_MASK - | KVM_REG_ARM_32_CRN_MASK - | KVM_REG_ARM_CRM_MASK - | KVM_REG_ARM_OPC1_MASK - | KVM_REG_ARM_32_OPC2_MASK)) - return false; - - params->is_64bit = false; - params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) - >> KVM_REG_ARM_32_CRN_SHIFT); - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) - >> KVM_REG_ARM_CRM_SHIFT); - params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) - >> KVM_REG_ARM_OPC1_SHIFT); - params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) - >> KVM_REG_ARM_32_OPC2_SHIFT); - return true; - case KVM_REG_SIZE_U64: - /* Any unused index bits means it's not valid. */ - if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK - | KVM_REG_ARM_COPROC_MASK - | KVM_REG_ARM_CRM_MASK - | KVM_REG_ARM_OPC1_MASK)) - return false; - params->is_64bit = true; - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) - >> KVM_REG_ARM_CRM_SHIFT); - params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) - >> KVM_REG_ARM_OPC1_SHIFT); - params->Op2 = 0; - params->CRn = 0; - return true; - default: - return false; - } -} - -/* Decode an index value, and find the cp15 coproc_reg entry. */ -static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, - u64 id) -{ - size_t num; - const struct coproc_reg *table, *r; - struct coproc_params params; - - /* We only do cp15 for now. */ - if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) - return NULL; - - if (!index_to_params(id, ¶ms)) - return NULL; - - table = get_target_table(vcpu->arch.target, &num); - r = find_reg(¶ms, table, num); - if (!r) - r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); - - /* Not saved in the cp15 array? */ - if (r && !r->reg) - r = NULL; - - return r; -} - -/* - * These are the invariant cp15 registers: we let the guest see the host - * versions of these, so they're part of the guest state. - * - * A future CPU may provide a mechanism to present different values to - * the guest, or a future kvm may trap them. - */ -/* Unfortunately, there's no register-argument for mrc, so generate. */ -#define FUNCTION_FOR32(crn, crm, op1, op2, name) \ - static void get_##name(struct kvm_vcpu *v, \ - const struct coproc_reg *r) \ - { \ - u32 val; \ - \ - asm volatile("mrc p15, " __stringify(op1) \ - ", %0, c" __stringify(crn) \ - ", c" __stringify(crm) \ - ", " __stringify(op2) "\n" : "=r" (val)); \ - ((struct coproc_reg *)r)->val = val; \ - } - -FUNCTION_FOR32(0, 0, 0, 0, MIDR) -FUNCTION_FOR32(0, 0, 0, 1, CTR) -FUNCTION_FOR32(0, 0, 0, 2, TCMTR) -FUNCTION_FOR32(0, 0, 0, 3, TLBTR) -FUNCTION_FOR32(0, 0, 0, 6, REVIDR) -FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) -FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) -FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) -FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) -FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) -FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) -FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) -FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) -FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) -FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) -FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) -FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) -FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) -FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) -FUNCTION_FOR32(0, 0, 1, 1, CLIDR) -FUNCTION_FOR32(0, 0, 1, 7, AIDR) - -/* ->val is filled in by kvm_invariant_coproc_table_init() */ -static struct coproc_reg invariant_cp15[] = { - { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, - - { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, - - { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, - - { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, - { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, -}; - -static int reg_from_user(void *val, const void __user *uaddr, u64 id) -{ - /* This Just Works because we are little endian. */ - if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) - return -EFAULT; - return 0; -} - -static int reg_to_user(void __user *uaddr, const void *val, u64 id) -{ - /* This Just Works because we are little endian. */ - if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) - return -EFAULT; - return 0; -} - -static int get_invariant_cp15(u64 id, void __user *uaddr) -{ - struct coproc_params params; - const struct coproc_reg *r; - - if (!index_to_params(id, ¶ms)) - return -ENOENT; - - r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); - if (!r) - return -ENOENT; - - return reg_to_user(uaddr, &r->val, id); -} - -static int set_invariant_cp15(u64 id, void __user *uaddr) -{ - struct coproc_params params; - const struct coproc_reg *r; - int err; - u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ - - if (!index_to_params(id, ¶ms)) - return -ENOENT; - r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); - if (!r) - return -ENOENT; - - err = reg_from_user(&val, uaddr, id); - if (err) - return err; - - /* This is what we mean by invariant: you can't change it. */ - if (r->val != val) - return -EINVAL; - - return 0; -} - -static bool is_valid_cache(u32 val) -{ - u32 level, ctype; - - if (val >= CSSELR_MAX) - return -ENOENT; - - /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ - level = (val >> 1); - ctype = (cache_levels >> (level * 3)) & 7; - - switch (ctype) { - case 0: /* No cache */ - return false; - case 1: /* Instruction cache only */ - return (val & 1); - case 2: /* Data cache only */ - case 4: /* Unified cache */ - return !(val & 1); - case 3: /* Separate instruction and data caches */ - return true; - default: /* Reserved: we can't know instruction or data. */ - return false; - } -} - -/* Which cache CCSIDR represents depends on CSSELR value. */ -static u32 get_ccsidr(u32 csselr) -{ - u32 ccsidr; - - /* Make sure noone else changes CSSELR during this! */ - local_irq_disable(); - /* Put value into CSSELR */ - asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); - isb(); - /* Read result out of CCSIDR */ - asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); - local_irq_enable(); - - return ccsidr; -} - -static int demux_c15_get(u64 id, void __user *uaddr) -{ - u32 val; - u32 __user *uval = uaddr; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { - case KVM_REG_ARM_DEMUX_ID_CCSIDR: - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) - >> KVM_REG_ARM_DEMUX_VAL_SHIFT; - if (!is_valid_cache(val)) - return -ENOENT; - - return put_user(get_ccsidr(val), uval); - default: - return -ENOENT; - } -} - -static int demux_c15_set(u64 id, void __user *uaddr) -{ - u32 val, newval; - u32 __user *uval = uaddr; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { - case KVM_REG_ARM_DEMUX_ID_CCSIDR: - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) - >> KVM_REG_ARM_DEMUX_VAL_SHIFT; - if (!is_valid_cache(val)) - return -ENOENT; - - if (get_user(newval, uval)) - return -EFAULT; - - /* This is also invariant: you can't change it. */ - if (newval != get_ccsidr(val)) - return -EINVAL; - return 0; - default: - return -ENOENT; - } -} - -#ifdef CONFIG_VFPv3 -static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, - KVM_REG_ARM_VFP_FPSCR, - KVM_REG_ARM_VFP_FPINST, - KVM_REG_ARM_VFP_FPINST2, - KVM_REG_ARM_VFP_MVFR0, - KVM_REG_ARM_VFP_MVFR1, - KVM_REG_ARM_VFP_FPSID }; - -static unsigned int num_fp_regs(void) -{ - if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) - return 32; - else - return 16; -} - -static unsigned int num_vfp_regs(void) -{ - /* Normal FP regs + control regs. */ - return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); -} - -static int copy_vfp_regids(u64 __user *uindices) -{ - unsigned int i; - const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; - const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; - - for (i = 0; i < num_fp_regs(); i++) { - if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, - uindices)) - return -EFAULT; - uindices++; - } - - for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { - if (put_user(u32reg | vfp_sysregs[i], uindices)) - return -EFAULT; - uindices++; - } - - return num_vfp_regs(); -} - -static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) -{ - u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); - u32 val; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - if (vfpid < num_fp_regs()) { - if (KVM_REG_SIZE(id) != 8) - return -ENOENT; - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], - id); - } - - /* FP control registers are all 32 bit. */ - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - - switch (vfpid) { - case KVM_REG_ARM_VFP_FPEXC: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); - case KVM_REG_ARM_VFP_FPSCR: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); - case KVM_REG_ARM_VFP_FPINST: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); - case KVM_REG_ARM_VFP_FPINST2: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); - case KVM_REG_ARM_VFP_MVFR0: - val = fmrx(MVFR0); - return reg_to_user(uaddr, &val, id); - case KVM_REG_ARM_VFP_MVFR1: - val = fmrx(MVFR1); - return reg_to_user(uaddr, &val, id); - case KVM_REG_ARM_VFP_FPSID: - val = fmrx(FPSID); - return reg_to_user(uaddr, &val, id); - default: - return -ENOENT; - } -} - -static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) -{ - u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); - u32 val; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - if (vfpid < num_fp_regs()) { - if (KVM_REG_SIZE(id) != 8) - return -ENOENT; - return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], - uaddr, id); - } - - /* FP control registers are all 32 bit. */ - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - - switch (vfpid) { - case KVM_REG_ARM_VFP_FPEXC: - return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); - case KVM_REG_ARM_VFP_FPSCR: - return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); - case KVM_REG_ARM_VFP_FPINST: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); - case KVM_REG_ARM_VFP_FPINST2: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); - /* These are invariant. */ - case KVM_REG_ARM_VFP_MVFR0: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(MVFR0)) - return -EINVAL; - return 0; - case KVM_REG_ARM_VFP_MVFR1: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(MVFR1)) - return -EINVAL; - return 0; - case KVM_REG_ARM_VFP_FPSID: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(FPSID)) - return -EINVAL; - return 0; - default: - return -ENOENT; - } -} -#else /* !CONFIG_VFPv3 */ -static unsigned int num_vfp_regs(void) -{ - return 0; -} - -static int copy_vfp_regids(u64 __user *uindices) -{ - return 0; -} - -static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) -{ - return -ENOENT; -} - -static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) -{ - return -ENOENT; -} -#endif /* !CONFIG_VFPv3 */ - -int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - const struct coproc_reg *r; - void __user *uaddr = (void __user *)(long)reg->addr; - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) - return demux_c15_get(reg->id, uaddr); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) - return vfp_get_reg(vcpu, reg->id, uaddr); - - r = index_to_coproc_reg(vcpu, reg->id); - if (!r) - return get_invariant_cp15(reg->id, uaddr); - - /* Note: copies two regs if size is 64 bit. */ - return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); -} - -int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - const struct coproc_reg *r; - void __user *uaddr = (void __user *)(long)reg->addr; - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) - return demux_c15_set(reg->id, uaddr); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) - return vfp_set_reg(vcpu, reg->id, uaddr); - - r = index_to_coproc_reg(vcpu, reg->id); - if (!r) - return set_invariant_cp15(reg->id, uaddr); - - /* Note: copies two regs if size is 64 bit */ - return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); -} - -static unsigned int num_demux_regs(void) -{ - unsigned int i, count = 0; - - for (i = 0; i < CSSELR_MAX; i++) - if (is_valid_cache(i)) - count++; - - return count; -} - -static int write_demux_regids(u64 __user *uindices) -{ - u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; - unsigned int i; - - val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; - for (i = 0; i < CSSELR_MAX; i++) { - if (!is_valid_cache(i)) - continue; - if (put_user(val | i, uindices)) - return -EFAULT; - uindices++; - } - return 0; -} - -static u64 cp15_to_index(const struct coproc_reg *reg) -{ - u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); - if (reg->is_64) { - val |= KVM_REG_SIZE_U64; - val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); - } else { - val |= KVM_REG_SIZE_U32; - val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); - val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); - } - return val; -} - -static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) -{ - if (!*uind) - return true; - - if (put_user(cp15_to_index(reg), *uind)) - return false; - - (*uind)++; - return true; -} - -/* Assumed ordered tables, see kvm_coproc_table_init. */ -static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) -{ - const struct coproc_reg *i1, *i2, *end1, *end2; - unsigned int total = 0; - size_t num; - - /* We check for duplicates here, to allow arch-specific overrides. */ - i1 = get_target_table(vcpu->arch.target, &num); - end1 = i1 + num; - i2 = cp15_regs; - end2 = cp15_regs + ARRAY_SIZE(cp15_regs); - - BUG_ON(i1 == end1 || i2 == end2); - - /* Walk carefully, as both tables may refer to the same register. */ - while (i1 || i2) { - int cmp = cmp_reg(i1, i2); - /* target-specific overrides generic entry. */ - if (cmp <= 0) { - /* Ignore registers we trap but don't save. */ - if (i1->reg) { - if (!copy_reg_to_user(i1, &uind)) - return -EFAULT; - total++; - } - } else { - /* Ignore registers we trap but don't save. */ - if (i2->reg) { - if (!copy_reg_to_user(i2, &uind)) - return -EFAULT; - total++; - } - } - - if (cmp <= 0 && ++i1 == end1) - i1 = NULL; - if (cmp >= 0 && ++i2 == end2) - i2 = NULL; - } - return total; -} - -unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) -{ - return ARRAY_SIZE(invariant_cp15) - + num_demux_regs() - + num_vfp_regs() - + walk_cp15(vcpu, (u64 __user *)NULL); -} - -int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int i; - int err; - - /* Then give them all the invariant registers' indices. */ - for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { - if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) - return -EFAULT; - uindices++; - } - - err = walk_cp15(vcpu, uindices); - if (err < 0) - return err; - uindices += err; - - err = copy_vfp_regids(uindices); - if (err < 0) - return err; - uindices += err; - - return write_demux_regids(uindices); -} - -void kvm_coproc_table_init(void) -{ - unsigned int i; - - /* Make sure tables are unique and in order. */ - for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) - BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); - - /* We abuse the reset function to overwrite the table itself. */ - for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) - invariant_cp15[i].reset(NULL, &invariant_cp15[i]); - - /* - * CLIDR format is awkward, so clean it up. See ARM B4.1.20: - * - * If software reads the Cache Type fields from Ctype1 - * upwards, once it has seen a value of 0b000, no caches - * exist at further-out levels of the hierarchy. So, for - * example, if Ctype3 is the first Cache Type field with a - * value of 0b000, the values of Ctype4 to Ctype7 must be - * ignored. - */ - asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); - for (i = 0; i < 7; i++) - if (((cache_levels >> (i*3)) & 7) == 0) - break; - /* Clear all higher bits. */ - cache_levels &= (1 << (i*3))-1; -} - -/** - * kvm_reset_coprocs - sets cp15 registers to reset value - * @vcpu: The VCPU pointer - * - * This function finds the right table above and sets the registers on the - * virtual CPU struct to their architecturally defined reset values. - */ -void kvm_reset_coprocs(struct kvm_vcpu *vcpu) -{ - size_t num; - const struct coproc_reg *table; - - /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); - - /* Generic chip reset first (so target could override). */ - reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); - - table = get_target_table(vcpu->arch.target, &num); - reset_coproc_regs(vcpu, table, num); - - for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu->arch.cp15[num] == 0x42424242) - panic("Didn't reset vcpu->arch.cp15[%zi]", num); -} diff --git a/trunk/arch/arm/kvm/coproc.h b/trunk/arch/arm/kvm/coproc.h deleted file mode 100644 index 992adfafa2ff..000000000000 --- a/trunk/arch/arm/kvm/coproc.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_COPROC_LOCAL_H__ -#define __ARM_KVM_COPROC_LOCAL_H__ - -struct coproc_params { - unsigned long CRn; - unsigned long CRm; - unsigned long Op1; - unsigned long Op2; - unsigned long Rt1; - unsigned long Rt2; - bool is_64bit; - bool is_write; -}; - -struct coproc_reg { - /* MRC/MCR/MRRC/MCRR instruction which accesses it. */ - unsigned long CRn; - unsigned long CRm; - unsigned long Op1; - unsigned long Op2; - - bool is_64; - - /* Trapped access from guest, if non-NULL. */ - bool (*access)(struct kvm_vcpu *, - const struct coproc_params *, - const struct coproc_reg *); - - /* Initialization for vcpu. */ - void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); - - /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ - unsigned long reg; - - /* Value (usually reset value) */ - u64 val; -}; - -static inline void print_cp_instr(const struct coproc_params *p) -{ - /* Look, we even formatted it for you to paste into the table! */ - if (p->is_64bit) { - kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", - p->CRm, p->Op1, p->is_write ? "write" : "read"); - } else { - kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," - " func_%s },\n", - p->CRn, p->CRm, p->Op1, p->Op2, - p->is_write ? "write" : "read"); - } -} - -static inline bool ignore_write(struct kvm_vcpu *vcpu, - const struct coproc_params *p) -{ - return true; -} - -static inline bool read_zero(struct kvm_vcpu *vcpu, - const struct coproc_params *p) -{ - *vcpu_reg(vcpu, p->Rt1) = 0; - return true; -} - -static inline bool write_to_read_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - kvm_debug("CP15 write to read-only register at: %08x\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - return false; -} - -static inline bool read_from_write_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - kvm_debug("CP15 read to write-only register at: %08x\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - return false; -} - -/* Reset functions */ -static inline void reset_unknown(struct kvm_vcpu *vcpu, - const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; -} - -static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = r->val; -} - -static inline void reset_unknown64(struct kvm_vcpu *vcpu, - const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); - - vcpu->arch.cp15[r->reg] = 0xdecafbad; - vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; -} - -static inline int cmp_reg(const struct coproc_reg *i1, - const struct coproc_reg *i2) -{ - BUG_ON(i1 == i2); - if (!i1) - return 1; - else if (!i2) - return -1; - if (i1->CRn != i2->CRn) - return i1->CRn - i2->CRn; - if (i1->CRm != i2->CRm) - return i1->CRm - i2->CRm; - if (i1->Op1 != i2->Op1) - return i1->Op1 - i2->Op1; - return i1->Op2 - i2->Op2; -} - - -#define CRn(_x) .CRn = _x -#define CRm(_x) .CRm = _x -#define Op1(_x) .Op1 = _x -#define Op2(_x) .Op2 = _x -#define is64 .is_64 = true -#define is32 .is_64 = false - -#endif /* __ARM_KVM_COPROC_LOCAL_H__ */ diff --git a/trunk/arch/arm/kvm/coproc_a15.c b/trunk/arch/arm/kvm/coproc_a15.c deleted file mode 100644 index 685063a6d0cf..000000000000 --- a/trunk/arch/arm/kvm/coproc_a15.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Rusty Russell - * Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -#include -#include -#include -#include -#include -#include -#include - -static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - /* - * Compute guest MPIDR: - * (Even if we present only one VCPU to the guest on an SMP - * host we don't set the U bit in the MPIDR, or vice versa, as - * revealing the underlying hardware properties is likely to - * be the best choice). - */ - vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK) - | (vcpu->vcpu_id & MPIDR_LEVEL_MASK); -} - -#include "coproc.h" - -/* A15 TRM 4.3.28: RO WI */ -static bool access_actlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; - return true; -} - -/* A15 TRM 4.3.60: R/O. */ -static bool access_cbar(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return write_to_read_only(vcpu, p); - return read_zero(vcpu, p); -} - -/* A15 TRM 4.3.48: R/O WI. */ -static bool access_l2ctlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; - return true; -} - -static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 l2ctlr, ncores; - - asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); - l2ctlr &= ~(3 << 24); - ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; - l2ctlr |= (ncores & 3) << 24; - - vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; -} - -static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 actlr; - - /* ACTLR contains SMP bit: make sure you create all cpus first! */ - asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); - /* Make the SMP bit consistent with the guest configuration */ - if (atomic_read(&vcpu->kvm->online_vcpus) > 1) - actlr |= 1U << 6; - else - actlr &= ~(1U << 6); - - vcpu->arch.cp15[c1_ACTLR] = actlr; -} - -/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */ -static bool access_l2ectlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = 0; - return true; -} - -/* - * A15-specific CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 - */ -static const struct coproc_reg a15_regs[] = { - /* MPIDR: we use VMPIDR for guest access. */ - { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, - NULL, reset_mpidr, c0_MPIDR }, - - /* SCTLR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_val, c1_SCTLR, 0x00C50078 }, - /* ACTLR: trapped by HCR.TAC bit. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, - access_actlr, reset_actlr, c1_ACTLR }, - /* CPACR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_val, c1_CPACR, 0x00000000 }, - - /* - * L2CTLR access (guest wants to know #CPUs). - */ - { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, - access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, - { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, - - /* The Configuration Base Address Register. */ - { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, -}; - -static struct kvm_coproc_target_table a15_target_table = { - .target = KVM_ARM_TARGET_CORTEX_A15, - .table = a15_regs, - .num = ARRAY_SIZE(a15_regs), -}; - -static int __init coproc_a15_init(void) -{ - unsigned int i; - - for (i = 1; i < ARRAY_SIZE(a15_regs); i++) - BUG_ON(cmp_reg(&a15_regs[i-1], - &a15_regs[i]) >= 0); - - kvm_register_target_coproc_table(&a15_target_table); - return 0; -} -late_initcall(coproc_a15_init); diff --git a/trunk/arch/arm/kvm/emulate.c b/trunk/arch/arm/kvm/emulate.c deleted file mode 100644 index d61450ac6665..000000000000 --- a/trunk/arch/arm/kvm/emulate.c +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include - -#include "trace.h" - -#define VCPU_NR_MODES 6 -#define VCPU_REG_OFFSET_USR 0 -#define VCPU_REG_OFFSET_FIQ 1 -#define VCPU_REG_OFFSET_IRQ 2 -#define VCPU_REG_OFFSET_SVC 3 -#define VCPU_REG_OFFSET_ABT 4 -#define VCPU_REG_OFFSET_UND 5 -#define REG_OFFSET(_reg) \ - (offsetof(struct kvm_regs, _reg) / sizeof(u32)) - -#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num]) - -static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { - /* USR/SYS Registers */ - [VCPU_REG_OFFSET_USR] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), - }, - - /* FIQ Registers */ - [VCPU_REG_OFFSET_FIQ] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), - REG_OFFSET(fiq_regs[0]), /* r8 */ - REG_OFFSET(fiq_regs[1]), /* r9 */ - REG_OFFSET(fiq_regs[2]), /* r10 */ - REG_OFFSET(fiq_regs[3]), /* r11 */ - REG_OFFSET(fiq_regs[4]), /* r12 */ - REG_OFFSET(fiq_regs[5]), /* r13 */ - REG_OFFSET(fiq_regs[6]), /* r14 */ - }, - - /* IRQ Registers */ - [VCPU_REG_OFFSET_IRQ] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(irq_regs[0]), /* r13 */ - REG_OFFSET(irq_regs[1]), /* r14 */ - }, - - /* SVC Registers */ - [VCPU_REG_OFFSET_SVC] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(svc_regs[0]), /* r13 */ - REG_OFFSET(svc_regs[1]), /* r14 */ - }, - - /* ABT Registers */ - [VCPU_REG_OFFSET_ABT] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(abt_regs[0]), /* r13 */ - REG_OFFSET(abt_regs[1]), /* r14 */ - }, - - /* UND Registers */ - [VCPU_REG_OFFSET_UND] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(und_regs[0]), /* r13 */ - REG_OFFSET(und_regs[1]), /* r14 */ - }, -}; - -/* - * Return a pointer to the register number valid in the current mode of - * the virtual CPU. - */ -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) -{ - u32 *reg_array = (u32 *)&vcpu->arch.regs; - u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; - - switch (mode) { - case USR_MODE...SVC_MODE: - mode &= ~MODE32_BIT; /* 0 ... 3 */ - break; - - case ABT_MODE: - mode = VCPU_REG_OFFSET_ABT; - break; - - case UND_MODE: - mode = VCPU_REG_OFFSET_UND; - break; - - case SYSTEM_MODE: - mode = VCPU_REG_OFFSET_USR; - break; - - default: - BUG(); - } - - return reg_array + vcpu_reg_offsets[mode][reg_num]; -} - -/* - * Return the SPSR for the current mode of the virtual CPU. - */ -u32 *vcpu_spsr(struct kvm_vcpu *vcpu) -{ - u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; - switch (mode) { - case SVC_MODE: - return &vcpu->arch.regs.KVM_ARM_SVC_spsr; - case ABT_MODE: - return &vcpu->arch.regs.KVM_ARM_ABT_spsr; - case UND_MODE: - return &vcpu->arch.regs.KVM_ARM_UND_spsr; - case IRQ_MODE: - return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; - case FIQ_MODE: - return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; - default: - BUG(); - } -} - -/** - * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest - * @vcpu: the vcpu pointer - * @run: the kvm_run structure pointer - * - * Simply sets the wait_for_interrupts flag on the vcpu structure, which will - * halt execution of world-switches and schedule other host processes until - * there is an incoming IRQ or FIQ to the VM. - */ -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - trace_kvm_wfi(*vcpu_pc(vcpu)); - kvm_vcpu_block(vcpu); - return 1; -} - -/** - * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block - * @vcpu: The VCPU pointer - * - * When exceptions occur while instructions are executed in Thumb IF-THEN - * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have - * to do this little bit of work manually. The fields map like this: - * - * IT[7:0] -> CPSR[26:25],CPSR[15:10] - */ -static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) -{ - unsigned long itbits, cond; - unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_arm = !(cpsr & PSR_T_BIT); - - BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); - - if (!(cpsr & PSR_IT_MASK)) - return; - - cond = (cpsr & 0xe000) >> 13; - itbits = (cpsr & 0x1c00) >> (10 - 2); - itbits |= (cpsr & (0x3 << 25)) >> 25; - - /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ - if ((itbits & 0x7) == 0) - itbits = cond = 0; - else - itbits = (itbits << 1) & 0x1f; - - cpsr &= ~PSR_IT_MASK; - cpsr |= cond << 13; - cpsr |= (itbits & 0x1c) << (10 - 2); - cpsr |= (itbits & 0x3) << 25; - *vcpu_cpsr(vcpu) = cpsr; -} - -/** - * kvm_skip_instr - skip a trapped instruction and proceed to the next - * @vcpu: The vcpu pointer - */ -void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) -{ - bool is_thumb; - - is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); - if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; - else - *vcpu_pc(vcpu) += 4; - kvm_adjust_itstate(vcpu); -} - - -/****************************************************************************** - * Inject exceptions into the guest - */ - -static u32 exc_vector_base(struct kvm_vcpu *vcpu) -{ - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - u32 vbar = vcpu->arch.cp15[c12_VBAR]; - - if (sctlr & SCTLR_V) - return 0xffff0000; - else /* always have security exceptions */ - return vbar; -} - -/** - * kvm_inject_undefined - inject an undefined exception into the guest - * @vcpu: The VCPU to receive the undefined exception - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - * - * Modelled after TakeUndefInstrException() pseudocode. - */ -void kvm_inject_undefined(struct kvm_vcpu *vcpu) -{ - u32 new_lr_value; - u32 new_spsr_value; - u32 cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - bool is_thumb = (cpsr & PSR_T_BIT); - u32 vect_offset = 4; - u32 return_offset = (is_thumb) ? 2 : 4; - - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) - return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to UND banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; - - /* Branch to exception vector */ - *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; -} - -/* - * Modelled after TakeDataAbortException() and TakePrefetchAbortException - * pseudocode. - */ -static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) -{ - u32 new_lr_value; - u32 new_spsr_value; - u32 cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - bool is_thumb = (cpsr & PSR_T_BIT); - u32 vect_offset; - u32 return_offset = (is_thumb) ? 4 : 0; - bool is_lpae; - - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) + return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to ABT banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; - - if (is_pabt) - vect_offset = 12; - else - vect_offset = 16; - - /* Branch to exception vector */ - *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; - - if (is_pabt) { - /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_IFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); - /* Always give debug fault for now - should give guest a clue */ - if (is_lpae) - vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; - else - vcpu->arch.cp15[c5_IFSR] = 2; - } else { /* !iabt */ - /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_DFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); - /* Always give debug fault for now - should give guest a clue */ - if (is_lpae) - vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; - else - vcpu->arch.cp15[c5_DFSR] = 2; - } - -} - -/** - * kvm_inject_dabt - inject a data abort into the guest - * @vcpu: The VCPU to receive the undefined exception - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - inject_abt(vcpu, false, addr); -} - -/** - * kvm_inject_pabt - inject a prefetch abort into the guest - * @vcpu: The VCPU to receive the undefined exception - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - inject_abt(vcpu, true, addr); -} diff --git a/trunk/arch/arm/kvm/guest.c b/trunk/arch/arm/kvm/guest.c deleted file mode 100644 index 2339d9609d36..000000000000 --- a/trunk/arch/arm/kvm/guest.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } -#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } - -struct kvm_stats_debugfs_item debugfs_entries[] = { - { NULL } -}; - -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - -static u64 core_reg_offset_from_id(u64 id) -{ - return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); -} - -static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; - u64 off; - - if (KVM_REG_SIZE(reg->id) != 4) - return -ENOENT; - - /* Our ID is an index into the kvm_regs struct. */ - off = core_reg_offset_from_id(reg->id); - if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) - return -ENOENT; - - return put_user(((u32 *)regs)[off], uaddr); -} - -static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; - u64 off, val; - - if (KVM_REG_SIZE(reg->id) != 4) - return -ENOENT; - - /* Our ID is an index into the kvm_regs struct. */ - off = core_reg_offset_from_id(reg->id); - if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) - return -ENOENT; - - if (get_user(val, uaddr) != 0) - return -EFAULT; - - if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) { - unsigned long mode = val & MODE_MASK; - switch (mode) { - case USR_MODE: - case FIQ_MODE: - case IRQ_MODE: - case SVC_MODE: - case ABT_MODE: - case UND_MODE: - break; - default: - return -EINVAL; - } - } - - ((u32 *)regs)[off] = val; - return 0; -} - -int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - return -EINVAL; -} - -static unsigned long num_core_regs(void) -{ - return sizeof(struct kvm_regs) / sizeof(u32); -} - -/** - * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG - * - * This is for all registers. - */ -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) -{ - return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); -} - -/** - * kvm_arm_copy_reg_indices - get indices of all registers. - * - * We do core registers right here, then we apppend coproc regs. - */ -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int i; - const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; - - for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { - if (put_user(core_reg | i, uindices)) - return -EFAULT; - uindices++; - } - - return kvm_arm_copy_coproc_indices(vcpu, uindices); -} - -int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - /* We currently use nothing arch-specific in upper 32 bits */ - if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) - return -EINVAL; - - /* Register group 16 means we want a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return get_core_reg(vcpu, reg); - - return kvm_arm_coproc_get_reg(vcpu, reg); -} - -int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - /* We currently use nothing arch-specific in upper 32 bits */ - if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) - return -EINVAL; - - /* Register group 16 means we set a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return set_core_reg(vcpu, reg); - - return kvm_arm_coproc_set_reg(vcpu, reg); -} - -int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init) -{ - unsigned int i; - - /* We can only do a cortex A15 for now. */ - if (init->target != kvm_target_cpu()) - return -EINVAL; - - vcpu->arch.target = init->target; - bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); - - /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ - for (i = 0; i < sizeof(init->features) * 8; i++) { - if (test_bit(i, (void *)init->features)) { - if (i >= KVM_VCPU_MAX_FEATURES) - return -ENOENT; - set_bit(i, vcpu->arch.features); - } - } - - /* Now we know what it is, we can reset it. */ - return kvm_reset_vcpu(vcpu); -} - -int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, - struct kvm_translation *tr) -{ - return -EINVAL; -} diff --git a/trunk/arch/arm/kvm/init.S b/trunk/arch/arm/kvm/init.S deleted file mode 100644 index 9f37a79b880b..000000000000 --- a/trunk/arch/arm/kvm/init.S +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include - -/******************************************************************** - * Hypervisor initialization - * - should be called with: - * r0,r1 = Hypervisor pgd pointer - * r2 = top of Hyp stack (kernel VA) - * r3 = pointer to hyp vectors - */ - - .text - .pushsection .hyp.idmap.text,"ax" - .align 5 -__kvm_hyp_init: - .globl __kvm_hyp_init - - @ Hyp-mode exception vector - W(b) . - W(b) . - W(b) . - W(b) . - W(b) . - W(b) __do_hyp_init - W(b) . - W(b) . - -__do_hyp_init: - @ Set the HTTBR to point to the hypervisor PGD pointer passed - mcrr p15, 4, r0, r1, c2 - - @ Set the HTCR and VTCR to the same shareability and cacheability - @ settings as the non-secure TTBCR and with T0SZ == 0. - mrc p15, 4, r0, c2, c0, 2 @ HTCR - ldr r12, =HTCR_MASK - bic r0, r0, r12 - mrc p15, 0, r1, c2, c0, 2 @ TTBCR - and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ) - orr r0, r0, r1 - mcr p15, 4, r0, c2, c0, 2 @ HTCR - - mrc p15, 4, r1, c2, c1, 2 @ VTCR - ldr r12, =VTCR_MASK - bic r1, r1, r12 - bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits - orr r1, r0, r1 - orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) - mcr p15, 4, r1, c2, c1, 2 @ VTCR - - @ Use the same memory attributes for hyp. accesses as the kernel - @ (copy MAIRx ro HMAIRx). - mrc p15, 0, r0, c10, c2, 0 - mcr p15, 4, r0, c10, c2, 0 - mrc p15, 0, r0, c10, c2, 1 - mcr p15, 4, r0, c10, c2, 1 - - @ Set the HSCTLR to: - @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) - @ - Endianness: Kernel config - @ - Fast Interrupt Features: Kernel config - @ - Write permission implies XN: disabled - @ - Instruction cache: enabled - @ - Data/Unified cache: enabled - @ - Memory alignment checks: enabled - @ - MMU: enabled (this code must be run from an identity mapping) - mrc p15, 4, r0, c1, c0, 0 @ HSCR - ldr r12, =HSCTLR_MASK - bic r0, r0, r12 - mrc p15, 0, r1, c1, c0, 0 @ SCTLR - ldr r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) - and r1, r1, r12 - ARM( ldr r12, =(HSCTLR_M | HSCTLR_A) ) - THUMB( ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) - orr r1, r1, r12 - orr r0, r0, r1 - isb - mcr p15, 4, r0, c1, c0, 0 @ HSCR - isb - - @ Set stack pointer and return to the kernel - mov sp, r2 - - @ Set HVBAR to point to the HYP vectors - mcr p15, 4, r3, c12, c0, 0 @ HVBAR - - eret - - .ltorg - - .globl __kvm_hyp_init_end -__kvm_hyp_init_end: - - .popsection diff --git a/trunk/arch/arm/kvm/interrupts.S b/trunk/arch/arm/kvm/interrupts.S deleted file mode 100644 index c5400d2e97ca..000000000000 --- a/trunk/arch/arm/kvm/interrupts.S +++ /dev/null @@ -1,478 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "interrupts_head.S" - - .text - -__kvm_hyp_code_start: - .globl __kvm_hyp_code_start - -/******************************************************************** - * Flush per-VMID TLBs - * - * void __kvm_tlb_flush_vmid(struct kvm *kvm); - * - * We rely on the hardware to broadcast the TLB invalidation to all CPUs - * inside the inner-shareable domain (which is the case for all v7 - * implementations). If we come across a non-IS SMP implementation, we'll - * have to use an IPI based mechanism. Until then, we stick to the simple - * hardware assisted version. - */ -ENTRY(__kvm_tlb_flush_vmid) - push {r2, r3} - - add r0, r0, #KVM_VTTBR - ldrd r2, r3, [r0] - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - isb - mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) - dsb - isb - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 - isb @ Not necessary if followed by eret - - pop {r2, r3} - bx lr -ENDPROC(__kvm_tlb_flush_vmid) - -/******************************************************************** - * Flush TLBs and instruction caches of all CPUs inside the inner-shareable - * domain, for all VMIDs - * - * void __kvm_flush_vm_context(void); - */ -ENTRY(__kvm_flush_vm_context) - mov r0, #0 @ rn parameter for c15 flushes is SBZ - - /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ - mcr p15, 4, r0, c8, c3, 4 - /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ - mcr p15, 0, r0, c7, c1, 0 - dsb - isb @ Not necessary if followed by eret - - bx lr -ENDPROC(__kvm_flush_vm_context) - - -/******************************************************************** - * Hypervisor world-switch code - * - * - * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) - */ -ENTRY(__kvm_vcpu_run) - @ Save the vcpu pointer - mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR - - save_host_regs - - @ Store hardware CP15 state and load guest state - read_cp15_state store_to_vcpu = 0 - write_cp15_state read_from_vcpu = 1 - - @ If the host kernel has not been configured with VFPv3 support, - @ then it is safer if we deny guests from using it as well. -#ifdef CONFIG_VFPv3 - @ Set FPEXC_EN so the guest doesn't trap floating point instructions - VFPFMRX r2, FPEXC @ VMRS - push {r2} - orr r2, r2, #FPEXC_EN - VFPFMXR FPEXC, r2 @ VMSR -#endif - - @ Configure Hyp-role - configure_hyp_role vmentry - - @ Trap coprocessor CRx accesses - set_hstr vmentry - set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) - set_hdcr vmentry - - @ Write configured ID register into MIDR alias - ldr r1, [vcpu, #VCPU_MIDR] - mcr p15, 4, r1, c0, c0, 0 - - @ Write guest view of MPIDR into VMPIDR - ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] - mcr p15, 4, r1, c0, c0, 5 - - @ Set up guest memory translation - ldr r1, [vcpu, #VCPU_KVM] - add r1, r1, #KVM_VTTBR - ldrd r2, r3, [r1] - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - - @ We're all done, just restore the GPRs and go to the guest - restore_guest_regs - clrex @ Clear exclusive monitor - eret - -__kvm_vcpu_return: - /* - * return convention: - * guest r0, r1, r2 saved on the stack - * r0: vcpu pointer - * r1: exception code - */ - save_guest_regs - - @ Set VMID == 0 - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - - @ Don't trap coprocessor accesses for host kernel - set_hstr vmexit - set_hdcr vmexit - set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) - -#ifdef CONFIG_VFPv3 - @ Save floating point registers we if let guest use them. - tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) - bne after_vfp_restore - - @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #VCPU_VFP_GUEST - store_vfp_state r7 - add r7, vcpu, #VCPU_VFP_HOST - ldr r7, [r7] - restore_vfp_state r7 - -after_vfp_restore: - @ Restore FPEXC_EN which we clobbered on entry - pop {r2} - VFPFMXR FPEXC, r2 -#endif - - @ Reset Hyp-role - configure_hyp_role vmexit - - @ Let host read hardware MIDR - mrc p15, 0, r2, c0, c0, 0 - mcr p15, 4, r2, c0, c0, 0 - - @ Back to hardware MPIDR - mrc p15, 0, r2, c0, c0, 5 - mcr p15, 4, r2, c0, c0, 5 - - @ Store guest CP15 state and restore host state - read_cp15_state store_to_vcpu = 1 - write_cp15_state read_from_vcpu = 0 - - restore_host_regs - clrex @ Clear exclusive monitor - mov r0, r1 @ Return the return code - mov r1, #0 @ Clear upper bits in return value - bx lr @ return to IOCTL - -/******************************************************************** - * Call function in Hyp mode - * - * - * u64 kvm_call_hyp(void *hypfn, ...); - * - * This is not really a variadic function in the classic C-way and care must - * be taken when calling this to ensure parameters are passed in registers - * only, since the stack will change between the caller and the callee. - * - * Call the function with the first argument containing a pointer to the - * function you wish to call in Hyp mode, and subsequent arguments will be - * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the - * function pointer can be passed). The function being called must be mapped - * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. - * - * The calling convention follows the standard AAPCS: - * r0 - r3: caller save - * r12: caller save - * rest: callee save - */ -ENTRY(kvm_call_hyp) - hvc #0 - bx lr - -/******************************************************************** - * Hypervisor exception vector and handlers - * - * - * The KVM/ARM Hypervisor ABI is defined as follows: - * - * Entry to Hyp mode from the host kernel will happen _only_ when an HVC - * instruction is issued since all traps are disabled when running the host - * kernel as per the Hyp-mode initialization at boot time. - * - * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc - * below) when the HVC instruction is called from SVC mode (i.e. a guest or the - * host kernel) and they cause a trap to the vector page + offset 0xc when HVC - * instructions are called from within Hyp-mode. - * - * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): - * Switching to Hyp mode is done through a simple HVC #0 instruction. The - * exception vector code will check that the HVC comes from VMID==0 and if - * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. - * - r0 contains a pointer to a HYP function - * - r1, r2, and r3 contain arguments to the above function. - * - The HYP function will be called with its arguments in r0, r1 and r2. - * On HYP function return, we return directly to SVC. - * - * Note that the above is used to execute code in Hyp-mode from a host-kernel - * point of view, and is a different concept from performing a world-switch and - * executing guest code SVC mode (with a VMID != 0). - */ - -/* Handle undef, svc, pabt, or dabt by crashing with a user notice */ -.macro bad_exception exception_code, panic_str - push {r0-r2} - mrrc p15, 6, r0, r1, c2 @ Read VTTBR - lsr r1, r1, #16 - ands r1, r1, #0xff - beq 99f - - load_vcpu @ Load VCPU pointer - .if \exception_code == ARM_EXCEPTION_DATA_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 0 @ HDFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - .if \exception_code == ARM_EXCEPTION_PREF_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 2 @ HIFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - mov r1, #\exception_code - b __kvm_vcpu_return - - @ We were in the host already. Let's craft a panic-ing return to SVC. -99: mrs r2, cpsr - bic r2, r2, #MODE_MASK - orr r2, r2, #SVC_MODE -THUMB( orr r2, r2, #PSR_T_BIT ) - msr spsr_cxsf, r2 - mrs r1, ELR_hyp - ldr r2, =BSYM(panic) - msr ELR_hyp, r2 - ldr r0, =\panic_str - eret -.endm - - .text - - .align 5 -__kvm_hyp_vector: - .globl __kvm_hyp_vector - - @ Hyp-mode exception vector - W(b) hyp_reset - W(b) hyp_undef - W(b) hyp_svc - W(b) hyp_pabt - W(b) hyp_dabt - W(b) hyp_hvc - W(b) hyp_irq - W(b) hyp_fiq - - .align -hyp_reset: - b hyp_reset - - .align -hyp_undef: - bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str - - .align -hyp_svc: - bad_exception ARM_EXCEPTION_HVC, svc_die_str - - .align -hyp_pabt: - bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str - - .align -hyp_dabt: - bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str - - .align -hyp_hvc: - /* - * Getting here is either becuase of a trap from a guest or from calling - * HVC from the host kernel, which means "switch to Hyp mode". - */ - push {r0, r1, r2} - - @ Check syndrome register - mrc p15, 4, r1, c5, c2, 0 @ HSR - lsr r0, r1, #HSR_EC_SHIFT -#ifdef CONFIG_VFPv3 - cmp r0, #HSR_EC_CP_0_13 - beq switch_to_guest_vfp -#endif - cmp r0, #HSR_EC_HVC - bne guest_trap @ Not HVC instr. - - /* - * Let's check if the HVC came from VMID 0 and allow simple - * switch to Hyp mode - */ - mrrc p15, 6, r0, r2, c2 - lsr r2, r2, #16 - and r2, r2, #0xff - cmp r2, #0 - bne guest_trap @ Guest called HVC - -host_switch_to_hyp: - pop {r0, r1, r2} - - push {lr} - mrs lr, SPSR - push {lr} - - mov lr, r0 - mov r0, r1 - mov r1, r2 - mov r2, r3 - -THUMB( orr lr, #1) - blx lr @ Call the HYP function - - pop {lr} - msr SPSR_csxf, lr - pop {lr} - eret - -guest_trap: - load_vcpu @ Load VCPU pointer to r0 - str r1, [vcpu, #VCPU_HSR] - - @ Check if we need the fault information - lsr r1, r1, #HSR_EC_SHIFT - cmp r1, #HSR_EC_IABT - mrceq p15, 4, r2, c6, c0, 2 @ HIFAR - beq 2f - cmp r1, #HSR_EC_DABT - bne 1f - mrc p15, 4, r2, c6, c0, 0 @ HDFAR - -2: str r2, [vcpu, #VCPU_HxFAR] - - /* - * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: - * - * Abort on the stage 2 translation for a memory access from a - * Non-secure PL1 or PL0 mode: - * - * For any Access flag fault or Translation fault, and also for any - * Permission fault on the stage 2 translation of a memory access - * made as part of a translation table walk for a stage 1 translation, - * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR - * is UNKNOWN. - */ - - /* Check for permission fault, and S1PTW */ - mrc p15, 4, r1, c5, c2, 0 @ HSR - and r0, r1, #HSR_FSC_TYPE - cmp r0, #FSC_PERM - tsteq r1, #(1 << 7) @ S1PTW - mrcne p15, 4, r2, c6, c0, 4 @ HPFAR - bne 3f - - /* Resolve IPA using the xFAR */ - mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR - isb - mrrc p15, 0, r0, r1, c7 @ PAR - tst r0, #1 - bne 4f @ Failed translation - ubfx r2, r0, #12, #20 - lsl r2, r2, #4 - orr r2, r2, r1, lsl #24 - -3: load_vcpu @ Load VCPU pointer to r0 - str r2, [r0, #VCPU_HPFAR] - -1: mov r1, #ARM_EXCEPTION_HVC - b __kvm_vcpu_return - -4: pop {r0, r1, r2} @ Failed translation, return to guest - eret - -/* - * If VFPv3 support is not available, then we will not switch the VFP - * registers; however cp10 and cp11 accesses will still trap and fallback - * to the regular coprocessor emulation code, which currently will - * inject an undefined exception to the guest. - */ -#ifdef CONFIG_VFPv3 -switch_to_guest_vfp: - load_vcpu @ Load VCPU pointer to r0 - push {r3-r7} - - @ NEON/VFP used. Turn on VFP access. - set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) - - @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_VFP_HOST - ldr r7, [r7] - store_vfp_state r7 - add r7, r0, #VCPU_VFP_GUEST - restore_vfp_state r7 - - pop {r3-r7} - pop {r0-r2} - eret -#endif - - .align -hyp_irq: - push {r0, r1, r2} - mov r1, #ARM_EXCEPTION_IRQ - load_vcpu @ Load VCPU pointer to r0 - b __kvm_vcpu_return - - .align -hyp_fiq: - b hyp_fiq - - .ltorg - -__kvm_hyp_code_end: - .globl __kvm_hyp_code_end - - .section ".rodata" - -und_die_str: - .ascii "unexpected undefined exception in Hyp mode at: %#08x" -pabt_die_str: - .ascii "unexpected prefetch abort in Hyp mode at: %#08x" -dabt_die_str: - .ascii "unexpected data abort in Hyp mode at: %#08x" -svc_die_str: - .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x" diff --git a/trunk/arch/arm/kvm/interrupts_head.S b/trunk/arch/arm/kvm/interrupts_head.S deleted file mode 100644 index 6a95d341e9c5..000000000000 --- a/trunk/arch/arm/kvm/interrupts_head.S +++ /dev/null @@ -1,441 +0,0 @@ -#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) -#define VCPU_USR_SP (VCPU_USR_REG(13)) -#define VCPU_USR_LR (VCPU_USR_REG(14)) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) - -/* - * Many of these macros need to access the VCPU structure, which is always - * held in r0. These macros should never clobber r1, as it is used to hold the - * exception code on the return path (except of course the macro that switches - * all the registers before the final jump to the VM). - */ -vcpu .req r0 @ vcpu pointer always in r0 - -/* Clobbers {r2-r6} */ -.macro store_vfp_state vfp_base - @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions - VFPFMRX r2, FPEXC - @ Make sure VFP is enabled so we can touch the registers. - orr r6, r2, #FPEXC_EN - VFPFMXR FPEXC, r6 - - VFPFMRX r3, FPSCR - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so - @ we only need to save them if FPEXC_EX is set. - VFPFMRX r4, FPINST - tst r2, #FPEXC_FP2V - VFPFMRX r5, FPINST2, ne @ vmrsne - bic r6, r2, #FPEXC_EX @ FPEXC_EX disable - VFPFMXR FPEXC, r6 -1: - VFPFSTMIA \vfp_base, r6 @ Save VFP registers - stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 -.endm - -/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ -.macro restore_vfp_state vfp_base - VFPFLDMIA \vfp_base, r6 @ Load VFP registers - ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 - - VFPFMXR FPSCR, r3 - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - VFPFMXR FPINST, r4 - tst r2, #FPEXC_FP2V - VFPFMXR FPINST2, r5, ne -1: - VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) -.endm - -/* These are simply for the macros to work - value don't have meaning */ -.equ usr, 0 -.equ svc, 1 -.equ abt, 2 -.equ und, 3 -.equ irq, 4 -.equ fiq, 5 - -.macro push_host_regs_mode mode - mrs r2, SP_\mode - mrs r3, LR_\mode - mrs r4, SPSR_\mode - push {r2, r3, r4} -.endm - -/* - * Store all host persistent registers on the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro save_host_regs - /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ - mrs r2, ELR_hyp - push {r2} - - /* usr regs */ - push {r4-r12} @ r0-r3 are always clobbered - mrs r2, SP_usr - mov r3, lr - push {r2, r3} - - push_host_regs_mode svc - push_host_regs_mode abt - push_host_regs_mode und - push_host_regs_mode irq - - /* fiq regs */ - mrs r2, r8_fiq - mrs r3, r9_fiq - mrs r4, r10_fiq - mrs r5, r11_fiq - mrs r6, r12_fiq - mrs r7, SP_fiq - mrs r8, LR_fiq - mrs r9, SPSR_fiq - push {r2-r9} -.endm - -.macro pop_host_regs_mode mode - pop {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all host registers from the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro restore_host_regs - pop {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - pop_host_regs_mode irq - pop_host_regs_mode und - pop_host_regs_mode abt - pop_host_regs_mode svc - - pop {r2, r3} - msr SP_usr, r2 - mov lr, r3 - pop {r4-r12} - - pop {r2} - msr ELR_hyp, r2 -.endm - -/* - * Restore SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r1, r2, r3, r4. - */ -.macro restore_guest_regs_mode mode, offset - add r1, vcpu, \offset - ldm r1, {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all guest registers from the vcpu struct. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers *all* registers. - */ -.macro restore_guest_regs - restore_guest_regs_mode svc, #VCPU_SVC_REGS - restore_guest_regs_mode abt, #VCPU_ABT_REGS - restore_guest_regs_mode und, #VCPU_UND_REGS - restore_guest_regs_mode irq, #VCPU_IRQ_REGS - - add r1, vcpu, #VCPU_FIQ_REGS - ldm r1, {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - @ Load return state - ldr r2, [vcpu, #VCPU_PC] - ldr r3, [vcpu, #VCPU_CPSR] - msr ELR_hyp, r2 - msr SPSR_cxsf, r3 - - @ Load user registers - ldr r2, [vcpu, #VCPU_USR_SP] - ldr r3, [vcpu, #VCPU_USR_LR] - msr SP_usr, r2 - mov lr, r3 - add vcpu, vcpu, #(VCPU_USR_REGS) - ldm vcpu, {r0-r12} -.endm - -/* - * Save SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs_mode mode, offset - add r2, vcpu, \offset - mrs r3, SP_\mode - mrs r4, LR_\mode - mrs r5, SPSR_\mode - stm r2, {r3, r4, r5} -.endm - -/* - * Save all guest registers to the vcpu struct - * Expects guest's r0, r1, r2 on the stack. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs - @ Store usr registers - add r2, vcpu, #VCPU_USR_REG(3) - stm r2, {r3-r12} - add r2, vcpu, #VCPU_USR_REG(0) - pop {r3, r4, r5} @ r0, r1, r2 - stm r2, {r3, r4, r5} - mrs r2, SP_usr - mov r3, lr - str r2, [vcpu, #VCPU_USR_SP] - str r3, [vcpu, #VCPU_USR_LR] - - @ Store return state - mrs r2, ELR_hyp - mrs r3, spsr - str r2, [vcpu, #VCPU_PC] - str r3, [vcpu, #VCPU_CPSR] - - @ Store other guest registers - save_guest_regs_mode svc, #VCPU_SVC_REGS - save_guest_regs_mode abt, #VCPU_ABT_REGS - save_guest_regs_mode und, #VCPU_UND_REGS - save_guest_regs_mode irq, #VCPU_IRQ_REGS -.endm - -/* Reads cp15 registers from hardware and stores them in memory - * @store_to_vcpu: If 0, registers are written in-order to the stack, - * otherwise to the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2 - r12 - */ -.macro read_cp15_state store_to_vcpu - mrc p15, 0, r2, c1, c0, 0 @ SCTLR - mrc p15, 0, r3, c1, c0, 2 @ CPACR - mrc p15, 0, r4, c2, c0, 2 @ TTBCR - mrc p15, 0, r5, c3, c0, 0 @ DACR - mrrc p15, 0, r6, r7, c2 @ TTBR 0 - mrrc p15, 1, r8, r9, c2 @ TTBR 1 - mrc p15, 0, r10, c10, c2, 0 @ PRRR - mrc p15, 0, r11, c10, c2, 1 @ NMRR - mrc p15, 2, r12, c0, c0, 0 @ CSSELR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - str r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - str r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r2, vcpu, #CP15_OFFSET(c2_TTBR0) - strd r6, r7, [r2] - add r2, vcpu, #CP15_OFFSET(c2_TTBR1) - strd r8, r9, [r2] - str r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - str r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mrc p15, 0, r2, c13, c0, 1 @ CID - mrc p15, 0, r3, c13, c0, 2 @ TID_URW - mrc p15, 0, r4, c13, c0, 3 @ TID_URO - mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV - mrc p15, 0, r6, c5, c0, 0 @ DFSR - mrc p15, 0, r7, c5, c0, 1 @ IFSR - mrc p15, 0, r8, c5, c1, 0 @ ADFSR - mrc p15, 0, r9, c5, c1, 1 @ AIFSR - mrc p15, 0, r10, c6, c0, 0 @ DFAR - mrc p15, 0, r11, c6, c0, 2 @ IFAR - mrc p15, 0, r12, c12, c0, 0 @ VBAR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c13_CID)] - str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - str r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - str r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - str r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif -.endm - -/* - * Reads cp15 registers from memory and writes them to hardware - * @read_from_vcpu: If 0, registers are read in-order from the stack, - * otherwise from the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - */ -.macro write_cp15_state read_from_vcpu - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] - ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mcr p15, 0, r2, c13, c0, 1 @ CID - mcr p15, 0, r3, c13, c0, 2 @ TID_URW - mcr p15, 0, r4, c13, c0, 3 @ TID_URO - mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV - mcr p15, 0, r6, c5, c0, 0 @ DFSR - mcr p15, 0, r7, c5, c0, 1 @ IFSR - mcr p15, 0, r8, c5, c1, 0 @ ADFSR - mcr p15, 0, r9, c5, c1, 1 @ AIFSR - mcr p15, 0, r10, c6, c0, 0 @ DFAR - mcr p15, 0, r11, c6, c0, 2 @ IFAR - mcr p15, 0, r12, c12, c0, 0 @ VBAR - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r12, vcpu, #CP15_OFFSET(c2_TTBR0) - ldrd r6, r7, [r12] - add r12, vcpu, #CP15_OFFSET(c2_TTBR1) - ldrd r8, r9, [r12] - ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mcr p15, 0, r2, c1, c0, 0 @ SCTLR - mcr p15, 0, r3, c1, c0, 2 @ CPACR - mcr p15, 0, r4, c2, c0, 2 @ TTBCR - mcr p15, 0, r5, c3, c0, 0 @ DACR - mcrr p15, 0, r6, r7, c2 @ TTBR 0 - mcrr p15, 1, r8, r9, c2 @ TTBR 1 - mcr p15, 0, r10, c10, c2, 0 @ PRRR - mcr p15, 0, r11, c10, c2, 1 @ NMRR - mcr p15, 2, r12, c0, c0, 0 @ CSSELR -.endm - -/* - * Save the VGIC CPU state into memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro save_vgic_state -.endm - -/* - * Restore the VGIC CPU state from memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro restore_vgic_state -.endm - -.equ vmentry, 0 -.equ vmexit, 1 - -/* Configures the HSTR (Hyp System Trap Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hstr operation - mrc p15, 4, r2, c1, c1, 3 - ldr r3, =HSTR_T(15) - .if \operation == vmentry - orr r2, r2, r3 @ Trap CR{15} - .else - bic r2, r2, r3 @ Don't trap any CRx accesses - .endif - mcr p15, 4, r2, c1, c1, 3 -.endm - -/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return - * (hardware reset value is 0). Keep previous value in r2. */ -.macro set_hcptr operation, mask - mrc p15, 4, r2, c1, c1, 2 - ldr r3, =\mask - .if \operation == vmentry - orr r3, r2, r3 @ Trap coproc-accesses defined in mask - .else - bic r3, r2, r3 @ Don't trap defined coproc-accesses - .endif - mcr p15, 4, r3, c1, c1, 2 -.endm - -/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hdcr operation - mrc p15, 4, r2, c1, c1, 1 - ldr r3, =(HDCR_TPM|HDCR_TPMCR) - .if \operation == vmentry - orr r2, r2, r3 @ Trap some perfmon accesses - .else - bic r2, r2, r3 @ Don't trap any perfmon accesses - .endif - mcr p15, 4, r2, c1, c1, 1 -.endm - -/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ -.macro configure_hyp_role operation - mrc p15, 4, r2, c1, c1, 0 @ HCR - bic r2, r2, #HCR_VIRT_EXCP_MASK - ldr r3, =HCR_GUEST_MASK - .if \operation == vmentry - orr r2, r2, r3 - ldr r3, [vcpu, #VCPU_IRQ_LINES] - orr r2, r2, r3 - .else - bic r2, r2, r3 - .endif - mcr p15, 4, r2, c1, c1, 0 -.endm - -.macro load_vcpu - mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR -.endm diff --git a/trunk/arch/arm/kvm/mmio.c b/trunk/arch/arm/kvm/mmio.c deleted file mode 100644 index 0144baf82904..000000000000 --- a/trunk/arch/arm/kvm/mmio.c +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include - -#include "trace.h" - -/** - * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation - * @vcpu: The VCPU pointer - * @run: The VCPU run struct containing the mmio data - * - * This should only be called after returning from userspace for MMIO load - * emulation. - */ -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - __u32 *dest; - unsigned int len; - int mask; - - if (!run->mmio.is_write) { - dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); - memset(dest, 0, sizeof(int)); - - len = run->mmio.len; - if (len > 4) - return -EINVAL; - - memcpy(dest, run->mmio.data, len); - - trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, - *((u64 *)run->mmio.data)); - - if (vcpu->arch.mmio_decode.sign_extend && len < 4) { - mask = 1U << ((len * 8) - 1); - *dest = (*dest ^ mask) - mask; - } - } - - return 0; -} - -static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - struct kvm_exit_mmio *mmio) -{ - unsigned long rt, len; - bool is_write, sign_extend; - - if ((vcpu->arch.hsr >> 8) & 1) { - /* cache operation on I/O addr, tell guest unsupported */ - kvm_inject_dabt(vcpu, vcpu->arch.hxfar); - return 1; - } - - if ((vcpu->arch.hsr >> 7) & 1) { - /* page table accesses IO mem: tell guest to fix its TTBR */ - kvm_inject_dabt(vcpu, vcpu->arch.hxfar); - return 1; - } - - switch ((vcpu->arch.hsr >> 22) & 0x3) { - case 0: - len = 1; - break; - case 1: - len = 2; - break; - case 2: - len = 4; - break; - default: - kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); - return -EFAULT; - } - - is_write = vcpu->arch.hsr & HSR_WNR; - sign_extend = vcpu->arch.hsr & HSR_SSE; - rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; - - if (kvm_vcpu_reg_is_pc(vcpu, rt)) { - /* IO memory trying to read/write pc */ - kvm_inject_pabt(vcpu, vcpu->arch.hxfar); - return 1; - } - - mmio->is_write = is_write; - mmio->phys_addr = fault_ipa; - mmio->len = len; - vcpu->arch.mmio_decode.sign_extend = sign_extend; - vcpu->arch.mmio_decode.rt = rt; - - /* - * The MMIO instruction is emulated and should not be re-executed - * in the guest. - */ - kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); - return 0; -} - -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa) -{ - struct kvm_exit_mmio mmio; - unsigned long rt; - int ret; - - /* - * Prepare MMIO operation. First stash it in a private - * structure that we can use for in-kernel emulation. If the - * kernel can't handle it, copy it into run->mmio and let user - * space do its magic. - */ - - if (vcpu->arch.hsr & HSR_ISV) { - ret = decode_hsr(vcpu, fault_ipa, &mmio); - if (ret) - return ret; - } else { - kvm_err("load/store instruction decoding not implemented\n"); - return -ENOSYS; - } - - rt = vcpu->arch.mmio_decode.rt; - trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : - KVM_TRACE_MMIO_READ_UNSATISFIED, - mmio.len, fault_ipa, - (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); - - if (mmio.is_write) - memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); - - kvm_prepare_mmio(run, &mmio); - return 0; -} diff --git a/trunk/arch/arm/kvm/mmu.c b/trunk/arch/arm/kvm/mmu.c deleted file mode 100644 index f30e13163a96..000000000000 --- a/trunk/arch/arm/kvm/mmu.c +++ /dev/null @@ -1,787 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "trace.h" - -extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; - -static DEFINE_MUTEX(kvm_hyp_pgd_mutex); - -static void kvm_tlb_flush_vmid(struct kvm *kvm) -{ - kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); -} - -static void kvm_set_pte(pte_t *pte, pte_t new_pte) -{ - pte_val(*pte) = new_pte; - /* - * flush_pmd_entry just takes a void pointer and cleans the necessary - * cache entries, so we can reuse the function for ptes. - */ - flush_pmd_entry(pte); -} - -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - int min, int max) -{ - void *page; - - BUG_ON(max > KVM_NR_MEM_OBJS); - if (cache->nobjs >= min) - return 0; - while (cache->nobjs < max) { - page = (void *)__get_free_page(PGALLOC_GFP); - if (!page) - return -ENOMEM; - cache->objects[cache->nobjs++] = page; - } - return 0; -} - -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) - free_page((unsigned long)mc->objects[--mc->nobjs]); -} - -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - BUG_ON(!mc || !mc->nobjs); - p = mc->objects[--mc->nobjs]; - return p; -} - -static void free_ptes(pmd_t *pmd, unsigned long addr) -{ - pte_t *pte; - unsigned int i; - - for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) { - if (!pmd_none(*pmd) && pmd_table(*pmd)) { - pte = pte_offset_kernel(pmd, addr); - pte_free_kernel(NULL, pte); - } - pmd++; - } -} - -/** - * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables - * - * Assumes this is a page table used strictly in Hyp-mode and therefore contains - * only mappings in the kernel memory area, which is above PAGE_OFFSET. - */ -void free_hyp_pmds(void) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - unsigned long addr; - - mutex_lock(&kvm_hyp_pgd_mutex); - for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { - pgd = hyp_pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - - if (pud_none(*pud)) - continue; - BUG_ON(pud_bad(*pud)); - - pmd = pmd_offset(pud, addr); - free_ptes(pmd, addr); - pmd_free(NULL, pmd); - pud_clear(pud); - } - mutex_unlock(&kvm_hyp_pgd_mutex); -} - -static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end) -{ - pte_t *pte; - unsigned long addr; - struct page *page; - - for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { - pte = pte_offset_kernel(pmd, addr); - BUG_ON(!virt_addr_valid(addr)); - page = virt_to_page(addr); - kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); - } -} - -static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end, - unsigned long *pfn_base) -{ - pte_t *pte; - unsigned long addr; - - for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { - pte = pte_offset_kernel(pmd, addr); - BUG_ON(pfn_valid(*pfn_base)); - kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); - (*pfn_base)++; - } -} - -static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, - unsigned long end, unsigned long *pfn_base) -{ - pmd_t *pmd; - pte_t *pte; - unsigned long addr, next; - - for (addr = start; addr < end; addr = next) { - pmd = pmd_offset(pud, addr); - - BUG_ON(pmd_sect(*pmd)); - - if (pmd_none(*pmd)) { - pte = pte_alloc_one_kernel(NULL, addr); - if (!pte) { - kvm_err("Cannot allocate Hyp pte\n"); - return -ENOMEM; - } - pmd_populate_kernel(NULL, pmd, pte); - } - - next = pmd_addr_end(addr, end); - - /* - * If pfn_base is NULL, we map kernel pages into HYP with the - * virtual address. Otherwise, this is considered an I/O - * mapping and we map the physical region starting at - * *pfn_base to [start, end[. - */ - if (!pfn_base) - create_hyp_pte_mappings(pmd, addr, next); - else - create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); - } - - return 0; -} - -static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) -{ - unsigned long start = (unsigned long)from; - unsigned long end = (unsigned long)to; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - unsigned long addr, next; - int err = 0; - - BUG_ON(start > end); - if (start < PAGE_OFFSET) - return -EINVAL; - - mutex_lock(&kvm_hyp_pgd_mutex); - for (addr = start; addr < end; addr = next) { - pgd = hyp_pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - - if (pud_none_or_clear_bad(pud)) { - pmd = pmd_alloc_one(NULL, addr); - if (!pmd) { - kvm_err("Cannot allocate Hyp pmd\n"); - err = -ENOMEM; - goto out; - } - pud_populate(NULL, pud, pmd); - } - - next = pgd_addr_end(addr, end); - err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); - if (err) - goto out; - } -out: - mutex_unlock(&kvm_hyp_pgd_mutex); - return err; -} - -/** - * create_hyp_mappings - map a kernel virtual address range in Hyp mode - * @from: The virtual kernel start address of the range - * @to: The virtual kernel end address of the range (exclusive) - * - * The same virtual address as the kernel virtual address is also used in - * Hyp-mode mapping to the same underlying physical pages. - * - * Note: Wrapping around zero in the "to" address is not supported. - */ -int create_hyp_mappings(void *from, void *to) -{ - return __create_hyp_mappings(from, to, NULL); -} - -/** - * create_hyp_io_mappings - map a physical IO range in Hyp mode - * @from: The virtual HYP start address of the range - * @to: The virtual HYP end address of the range (exclusive) - * @addr: The physical start address which gets mapped - */ -int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) -{ - unsigned long pfn = __phys_to_pfn(addr); - return __create_hyp_mappings(from, to, &pfn); -} - -/** - * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. - * @kvm: The KVM struct pointer for the VM. - * - * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can - * support either full 40-bit input addresses or limited to 32-bit input - * addresses). Clears the allocated pages. - * - * Note we don't need locking here as this is only called when the VM is - * created, which can only be done once. - */ -int kvm_alloc_stage2_pgd(struct kvm *kvm) -{ - pgd_t *pgd; - - if (kvm->arch.pgd != NULL) { - kvm_err("kvm_arch already initialized?\n"); - return -EINVAL; - } - - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); - if (!pgd) - return -ENOMEM; - - /* stage-2 pgd must be aligned to its size */ - VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); - - memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); - clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); - kvm->arch.pgd = pgd; - - return 0; -} - -static void clear_pud_entry(pud_t *pud) -{ - pmd_t *pmd_table = pmd_offset(pud, 0); - pud_clear(pud); - pmd_free(NULL, pmd_table); - put_page(virt_to_page(pud)); -} - -static void clear_pmd_entry(pmd_t *pmd) -{ - pte_t *pte_table = pte_offset_kernel(pmd, 0); - pmd_clear(pmd); - pte_free_kernel(NULL, pte_table); - put_page(virt_to_page(pmd)); -} - -static bool pmd_empty(pmd_t *pmd) -{ - struct page *pmd_page = virt_to_page(pmd); - return page_count(pmd_page) == 1; -} - -static void clear_pte_entry(pte_t *pte) -{ - if (pte_present(*pte)) { - kvm_set_pte(pte, __pte(0)); - put_page(virt_to_page(pte)); - } -} - -static bool pte_empty(pte_t *pte) -{ - struct page *pte_page = virt_to_page(pte); - return page_count(pte_page) == 1; -} - -/** - * unmap_stage2_range -- Clear stage2 page table entries to unmap a range - * @kvm: The VM pointer - * @start: The intermediate physical base address of the range to unmap - * @size: The size of the area to unmap - * - * Clear a range of stage-2 mappings, lowering the various ref-counts. Must - * be called while holding mmu_lock (unless for freeing the stage2 pgd before - * destroying the VM), otherwise another faulting VCPU may come in and mess - * with things behind our backs. - */ -static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - phys_addr_t addr = start, end = start + size; - u64 range; - - while (addr < end) { - pgd = kvm->arch.pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - addr += PUD_SIZE; - continue; - } - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - addr += PMD_SIZE; - continue; - } - - pte = pte_offset_kernel(pmd, addr); - clear_pte_entry(pte); - range = PAGE_SIZE; - - /* If we emptied the pte, walk back up the ladder */ - if (pte_empty(pte)) { - clear_pmd_entry(pmd); - range = PMD_SIZE; - if (pmd_empty(pmd)) { - clear_pud_entry(pud); - range = PUD_SIZE; - } - } - - addr += range; - } -} - -/** - * kvm_free_stage2_pgd - free all stage-2 tables - * @kvm: The KVM struct pointer for the VM. - * - * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all - * underlying level-2 and level-3 tables before freeing the actual level-1 table - * and setting the struct pointer to NULL. - * - * Note we don't need locking here as this is only called when the VM is - * destroyed, which can only be done once. - */ -void kvm_free_stage2_pgd(struct kvm *kvm) -{ - if (kvm->arch.pgd == NULL) - return; - - unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); - kvm->arch.pgd = NULL; -} - - -static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, - phys_addr_t addr, const pte_t *new_pte, bool iomap) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte, old_pte; - - /* Create 2nd stage page table mapping - Level 1 */ - pgd = kvm->arch.pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - if (!cache) - return 0; /* ignore calls from kvm_set_spte_hva */ - pmd = mmu_memory_cache_alloc(cache); - pud_populate(NULL, pud, pmd); - pmd += pmd_index(addr); - get_page(virt_to_page(pud)); - } else - pmd = pmd_offset(pud, addr); - - /* Create 2nd stage page table mapping - Level 2 */ - if (pmd_none(*pmd)) { - if (!cache) - return 0; /* ignore calls from kvm_set_spte_hva */ - pte = mmu_memory_cache_alloc(cache); - clean_pte_table(pte); - pmd_populate_kernel(NULL, pmd, pte); - pte += pte_index(addr); - get_page(virt_to_page(pmd)); - } else - pte = pte_offset_kernel(pmd, addr); - - if (iomap && pte_present(*pte)) - return -EFAULT; - - /* Create 2nd stage page table mapping - Level 3 */ - old_pte = *pte; - kvm_set_pte(pte, *new_pte); - if (pte_present(old_pte)) - kvm_tlb_flush_vmid(kvm); - else - get_page(virt_to_page(pte)); - - return 0; -} - -/** - * kvm_phys_addr_ioremap - map a device range to guest IPA - * - * @kvm: The KVM pointer - * @guest_ipa: The IPA at which to insert the mapping - * @pa: The physical address of the device - * @size: The size of the mapping - */ -int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size) -{ - phys_addr_t addr, end; - int ret = 0; - unsigned long pfn; - struct kvm_mmu_memory_cache cache = { 0, }; - - end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; - pfn = __phys_to_pfn(pa); - - for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { - pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); - - ret = mmu_topup_memory_cache(&cache, 2, 2); - if (ret) - goto out; - spin_lock(&kvm->mmu_lock); - ret = stage2_set_pte(kvm, &cache, addr, &pte, true); - spin_unlock(&kvm->mmu_lock); - if (ret) - goto out; - - pfn++; - } - -out: - mmu_free_memory_cache(&cache); - return ret; -} - -static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) -{ - /* - * If we are going to insert an instruction page and the icache is - * either VIPT or PIPT, there is a potential problem where the host - * (or another VM) may have used the same page as this guest, and we - * read incorrect data from the icache. If we're using a PIPT cache, - * we can invalidate just that page, but if we are using a VIPT cache - * we need to invalidate the entire icache - damn shame - as written - * in the ARM ARM (DDI 0406C.b - Page B3-1393). - * - * VIVT caches are tagged using both the ASID and the VMID and doesn't - * need any kind of flushing (DDI 0406C.b - Page B3-1392). - */ - if (icache_is_pipt()) { - unsigned long hva = gfn_to_hva(kvm, gfn); - __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); - } else if (!icache_is_vivt_asid_tagged()) { - /* any kind of VIPT cache */ - __flush_icache_all(); - } -} - -static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - gfn_t gfn, struct kvm_memory_slot *memslot, - unsigned long fault_status) -{ - pte_t new_pte; - pfn_t pfn; - int ret; - bool write_fault, writable; - unsigned long mmu_seq; - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; - - write_fault = kvm_is_write_fault(vcpu->arch.hsr); - if (fault_status == FSC_PERM && !write_fault) { - kvm_err("Unexpected L2 read permission error\n"); - return -EFAULT; - } - - /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); - if (ret) - return ret; - - mmu_seq = vcpu->kvm->mmu_notifier_seq; - /* - * Ensure the read of mmu_notifier_seq happens before we call - * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk - * the page we just got a reference to gets unmapped before we have a - * chance to grab the mmu_lock, which ensure that if the page gets - * unmapped afterwards, the call to kvm_unmap_hva will take it away - * from us again properly. This smp_rmb() interacts with the smp_wmb() - * in kvm_mmu_notifier_invalidate_. - */ - smp_rmb(); - - pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); - if (is_error_pfn(pfn)) - return -EFAULT; - - new_pte = pfn_pte(pfn, PAGE_S2); - coherent_icache_guest_page(vcpu->kvm, gfn); - - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) - goto out_unlock; - if (writable) { - pte_val(new_pte) |= L_PTE_S2_RDWR; - kvm_set_pfn_dirty(pfn); - } - stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); - -out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(pfn); - return 0; -} - -/** - * kvm_handle_guest_abort - handles all 2nd stage aborts - * @vcpu: the VCPU pointer - * @run: the kvm_run structure - * - * Any abort that gets to the host is almost guaranteed to be caused by a - * missing second stage translation table entry, which can mean that either the - * guest simply needs more memory and we must allocate an appropriate page or it - * can mean that the guest tried to access I/O memory, which is emulated by user - * space. The distinction is based on the IPA causing the fault and whether this - * memory region has been registered as standard RAM by user space. - */ -int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - unsigned long hsr_ec; - unsigned long fault_status; - phys_addr_t fault_ipa; - struct kvm_memory_slot *memslot; - bool is_iabt; - gfn_t gfn; - int ret, idx; - - hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; - is_iabt = (hsr_ec == HSR_EC_IABT); - fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; - - trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, - vcpu->arch.hxfar, fault_ipa); - - /* Check the stage-2 fault is trans. fault or write fault */ - fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); - if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { - kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", - hsr_ec, fault_status); - return -EFAULT; - } - - idx = srcu_read_lock(&vcpu->kvm->srcu); - - gfn = fault_ipa >> PAGE_SHIFT; - if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { - if (is_iabt) { - /* Prefetch Abort on I/O address */ - kvm_inject_pabt(vcpu, vcpu->arch.hxfar); - ret = 1; - goto out_unlock; - } - - if (fault_status != FSC_FAULT) { - kvm_err("Unsupported fault status on io memory: %#lx\n", - fault_status); - ret = -EFAULT; - goto out_unlock; - } - - /* Adjust page offset */ - fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; - ret = io_mem_abort(vcpu, run, fault_ipa); - goto out_unlock; - } - - memslot = gfn_to_memslot(vcpu->kvm, gfn); - if (!memslot->user_alloc) { - kvm_err("non user-alloc memslots not supported\n"); - ret = -EINVAL; - goto out_unlock; - } - - ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status); - if (ret == 0) - ret = 1; -out_unlock: - srcu_read_unlock(&vcpu->kvm->srcu, idx); - return ret; -} - -static void handle_hva_to_gpa(struct kvm *kvm, - unsigned long start, - unsigned long end, - void (*handler)(struct kvm *kvm, - gpa_t gpa, void *data), - void *data) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - - slots = kvm_memslots(kvm); - - /* we only care about the pages that the guest sees */ - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - for (; gfn < gfn_end; ++gfn) { - gpa_t gpa = gfn << PAGE_SHIFT; - handler(kvm, gpa, data); - } - } -} - -static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) -{ - unmap_stage2_range(kvm, gpa, PAGE_SIZE); - kvm_tlb_flush_vmid(kvm); -} - -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) -{ - unsigned long end = hva + PAGE_SIZE; - - if (!kvm->arch.pgd) - return 0; - - trace_kvm_unmap_hva(hva); - handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); - return 0; -} - -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) -{ - if (!kvm->arch.pgd) - return 0; - - trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); - return 0; -} - -static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) -{ - pte_t *pte = (pte_t *)data; - - stage2_set_pte(kvm, NULL, gpa, pte, false); -} - - -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) -{ - unsigned long end = hva + PAGE_SIZE; - pte_t stage2_pte; - - if (!kvm->arch.pgd) - return; - - trace_kvm_set_spte_hva(hva); - stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); - handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); -} - -void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) -{ - mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); -} - -phys_addr_t kvm_mmu_get_httbr(void) -{ - VM_BUG_ON(!virt_addr_valid(hyp_pgd)); - return virt_to_phys(hyp_pgd); -} - -int kvm_mmu_init(void) -{ - if (!hyp_pgd) { - kvm_err("Hyp mode PGD not allocated\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * kvm_clear_idmap - remove all idmaps from the hyp pgd - * - * Free the underlying pmds for all pgds in range and clear the pgds (but - * don't free them) afterwards. - */ -void kvm_clear_hyp_idmap(void) -{ - unsigned long addr, end; - unsigned long next; - pgd_t *pgd = hyp_pgd; - pud_t *pud; - pmd_t *pmd; - - addr = virt_to_phys(__hyp_idmap_text_start); - end = virt_to_phys(__hyp_idmap_text_end); - - pgd += pgd_index(addr); - do { - next = pgd_addr_end(addr, end); - if (pgd_none_or_clear_bad(pgd)) - continue; - pud = pud_offset(pgd, addr); - pmd = pmd_offset(pud, addr); - - pud_clear(pud); - clean_pmd_entry(pmd); - pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); - } while (pgd++, addr = next, addr < end); -} diff --git a/trunk/arch/arm/kvm/psci.c b/trunk/arch/arm/kvm/psci.c deleted file mode 100644 index 7ee5bb7a3667..000000000000 --- a/trunk/arch/arm/kvm/psci.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -#include -#include - -/* - * This is an implementation of the Power State Coordination Interface - * as described in ARM document number ARM DEN 0022A. - */ - -static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) -{ - vcpu->arch.pause = true; -} - -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) -{ - struct kvm *kvm = source_vcpu->kvm; - struct kvm_vcpu *vcpu; - wait_queue_head_t *wq; - unsigned long cpu_id; - phys_addr_t target_pc; - - cpu_id = *vcpu_reg(source_vcpu, 1); - if (vcpu_mode_is_32bit(source_vcpu)) - cpu_id &= ~((u32) 0); - - if (cpu_id >= atomic_read(&kvm->online_vcpus)) - return KVM_PSCI_RET_INVAL; - - target_pc = *vcpu_reg(source_vcpu, 2); - - vcpu = kvm_get_vcpu(kvm, cpu_id); - - wq = kvm_arch_vcpu_wq(vcpu); - if (!waitqueue_active(wq)) - return KVM_PSCI_RET_INVAL; - - kvm_reset_vcpu(vcpu); - - /* Gracefully handle Thumb2 entry point */ - if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { - target_pc &= ~((phys_addr_t) 1); - vcpu_set_thumb(vcpu); - } - - *vcpu_pc(vcpu) = target_pc; - vcpu->arch.pause = false; - smp_mb(); /* Make sure the above is visible */ - - wake_up_interruptible(wq); - - return KVM_PSCI_RET_SUCCESS; -} - -/** - * kvm_psci_call - handle PSCI call if r0 value is in range - * @vcpu: Pointer to the VCPU struct - * - * Handle PSCI calls from guests through traps from HVC or SMC instructions. - * The calling convention is similar to SMC calls to the secure world where - * the function number is placed in r0 and this function returns true if the - * function number specified in r0 is withing the PSCI range, and false - * otherwise. - */ -bool kvm_psci_call(struct kvm_vcpu *vcpu) -{ - unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); - unsigned long val; - - switch (psci_fn) { - case KVM_PSCI_FN_CPU_OFF: - kvm_psci_vcpu_off(vcpu); - val = KVM_PSCI_RET_SUCCESS; - break; - case KVM_PSCI_FN_CPU_ON: - val = kvm_psci_vcpu_on(vcpu); - break; - case KVM_PSCI_FN_CPU_SUSPEND: - case KVM_PSCI_FN_MIGRATE: - val = KVM_PSCI_RET_NI; - break; - - default: - return false; - } - - *vcpu_reg(vcpu, 0) = val; - return true; -} diff --git a/trunk/arch/arm/kvm/reset.c b/trunk/arch/arm/kvm/reset.c deleted file mode 100644 index b80256b554cd..000000000000 --- a/trunk/arch/arm/kvm/reset.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/****************************************************************************** - * Cortex-A15 Reset Values - */ - -static const int a15_max_cpu_idx = 3; - -static struct kvm_regs a15_regs_reset = { - .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, -}; - - -/******************************************************************************* - * Exported reset function - */ - -/** - * kvm_reset_vcpu - sets core registers and cp15 registers to reset value - * @vcpu: The VCPU pointer - * - * This function finds the right table above and sets the registers on the - * virtual CPU struct to their architectually defined reset values. - */ -int kvm_reset_vcpu(struct kvm_vcpu *vcpu) -{ - struct kvm_regs *cpu_reset; - - switch (vcpu->arch.target) { - case KVM_ARM_TARGET_CORTEX_A15: - if (vcpu->vcpu_id > a15_max_cpu_idx) - return -EINVAL; - cpu_reset = &a15_regs_reset; - vcpu->arch.midr = read_cpuid_id(); - break; - default: - return -ENODEV; - } - - /* Reset core registers */ - memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); - - /* Reset CP15 registers */ - kvm_reset_coprocs(vcpu); - - return 0; -} diff --git a/trunk/arch/arm/kvm/trace.h b/trunk/arch/arm/kvm/trace.h deleted file mode 100644 index a8e73ed5ad5b..000000000000 --- a/trunk/arch/arm/kvm/trace.h +++ /dev/null @@ -1,235 +0,0 @@ -#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_KVM_H - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM kvm - -/* - * Tracepoints for entry/exit to guest - */ -TRACE_EVENT(kvm_entry, - TP_PROTO(unsigned long vcpu_pc), - TP_ARGS(vcpu_pc), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - ), - - TP_printk("PC: 0x%08lx", __entry->vcpu_pc) -); - -TRACE_EVENT(kvm_exit, - TP_PROTO(unsigned long vcpu_pc), - TP_ARGS(vcpu_pc), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - ), - - TP_printk("PC: 0x%08lx", __entry->vcpu_pc) -); - -TRACE_EVENT(kvm_guest_fault, - TP_PROTO(unsigned long vcpu_pc, unsigned long hsr, - unsigned long hxfar, - unsigned long long ipa), - TP_ARGS(vcpu_pc, hsr, hxfar, ipa), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( unsigned long, hsr ) - __field( unsigned long, hxfar ) - __field( unsigned long long, ipa ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->hsr = hsr; - __entry->hxfar = hxfar; - __entry->ipa = ipa; - ), - - TP_printk("guest fault at PC %#08lx (hxfar %#08lx, " - "ipa %#16llx, hsr %#08lx", - __entry->vcpu_pc, __entry->hxfar, - __entry->ipa, __entry->hsr) -); - -TRACE_EVENT(kvm_irq_line, - TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), - TP_ARGS(type, vcpu_idx, irq_num, level), - - TP_STRUCT__entry( - __field( unsigned int, type ) - __field( int, vcpu_idx ) - __field( int, irq_num ) - __field( int, level ) - ), - - TP_fast_assign( - __entry->type = type; - __entry->vcpu_idx = vcpu_idx; - __entry->irq_num = irq_num; - __entry->level = level; - ), - - TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d", - (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" : - (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" : - (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN", - __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) -); - -TRACE_EVENT(kvm_mmio_emulate, - TP_PROTO(unsigned long vcpu_pc, unsigned long instr, - unsigned long cpsr), - TP_ARGS(vcpu_pc, instr, cpsr), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( unsigned long, instr ) - __field( unsigned long, cpsr ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->instr = instr; - __entry->cpsr = cpsr; - ), - - TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", - __entry->vcpu_pc, __entry->instr, __entry->cpsr) -); - -/* Architecturally implementation defined CP15 register access */ -TRACE_EVENT(kvm_emulate_cp15_imp, - TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, - unsigned long CRm, unsigned long Op2, bool is_write), - TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), - - TP_STRUCT__entry( - __field( unsigned int, Op1 ) - __field( unsigned int, Rt1 ) - __field( unsigned int, CRn ) - __field( unsigned int, CRm ) - __field( unsigned int, Op2 ) - __field( bool, is_write ) - ), - - TP_fast_assign( - __entry->is_write = is_write; - __entry->Op1 = Op1; - __entry->Rt1 = Rt1; - __entry->CRn = CRn; - __entry->CRm = CRm; - __entry->Op2 = Op2; - ), - - TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", - (__entry->is_write) ? "mcr" : "mrc", - __entry->Op1, __entry->Rt1, __entry->CRn, - __entry->CRm, __entry->Op2) -); - -TRACE_EVENT(kvm_wfi, - TP_PROTO(unsigned long vcpu_pc), - TP_ARGS(vcpu_pc), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - ), - - TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc) -); - -TRACE_EVENT(kvm_unmap_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) -); - -TRACE_EVENT(kvm_unmap_hva_range, - TP_PROTO(unsigned long start, unsigned long end), - TP_ARGS(start, end), - - TP_STRUCT__entry( - __field( unsigned long, start ) - __field( unsigned long, end ) - ), - - TP_fast_assign( - __entry->start = start; - __entry->end = end; - ), - - TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", - __entry->start, __entry->end) -); - -TRACE_EVENT(kvm_set_spte_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) -); - -TRACE_EVENT(kvm_hvc, - TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), - TP_ARGS(vcpu_pc, r0, imm), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( unsigned long, r0 ) - __field( unsigned long, imm ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->r0 = r0; - __entry->imm = imm; - ), - - TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", - __entry->vcpu_pc, __entry->r0, __entry->imm) -); - -#endif /* _TRACE_KVM_H */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH arch/arm/kvm -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE trace - -/* This part must be outside protection */ -#include diff --git a/trunk/arch/arm/mach-at91/setup.c b/trunk/arch/arm/mach-at91/setup.c index 4b678478cf95..9ee866ce0478 100644 --- a/trunk/arch/arm/mach-at91/setup.c +++ b/trunk/arch/arm/mach-at91/setup.c @@ -105,8 +105,6 @@ static void __init soc_detect(u32 dbgu_base) switch (socid) { case ARCH_ID_AT91RM9200: at91_soc_initdata.type = AT91_SOC_RM9200; - if (at91_soc_initdata.subtype == AT91_SOC_SUBTYPE_NONE) - at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA; at91_boot_soc = at91rm9200_soc; break; diff --git a/trunk/arch/arm/mach-exynos/Kconfig b/trunk/arch/arm/mach-exynos/Kconfig index 85afb031b676..e103c290bc9e 100644 --- a/trunk/arch/arm/mach-exynos/Kconfig +++ b/trunk/arch/arm/mach-exynos/Kconfig @@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT select CPU_EXYNOS4210 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD select PINCTRL - select PINCTRL_EXYNOS + select PINCTRL_EXYNOS4 select USE_OF help Machine support for Samsung Exynos4 machine with device tree enabled. diff --git a/trunk/arch/arm/mach-imx/Kconfig b/trunk/arch/arm/mach-imx/Kconfig index 0a2349dc7018..3e628fd7a674 100644 --- a/trunk/arch/arm/mach-imx/Kconfig +++ b/trunk/arch/arm/mach-imx/Kconfig @@ -851,7 +851,6 @@ config SOC_IMX6Q select HAVE_CAN_FLEXCAN if CAN select HAVE_IMX_GPC select HAVE_IMX_MMDC - select HAVE_IMX_SRC select HAVE_SMP select MFD_SYSCON select PINCTRL diff --git a/trunk/arch/arm/mach-imx/clk-imx25.c b/trunk/arch/arm/mach-imx/clk-imx25.c index 2c570cdaae7b..b197aa73dc4b 100644 --- a/trunk/arch/arm/mach-imx/clk-imx25.c +++ b/trunk/arch/arm/mach-imx/clk-imx25.c @@ -254,9 +254,9 @@ int __init mx25_clocks_init(void) clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2"); clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); - clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); - clk_register_clkdev(clk[usbotg_ahb], "ahb", "imx-udc-mx27"); - clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); + clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); + clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc"); + clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); clk_register_clkdev(clk[nfc_ipg_per], NULL, "imx25-nand.0"); /* i.mx25 has the i.mx35 type cspi */ clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0"); diff --git a/trunk/arch/arm/mach-imx/clk-imx27.c b/trunk/arch/arm/mach-imx/clk-imx27.c index 1ffe3b534e51..4c1d1e4efc74 100644 --- a/trunk/arch/arm/mach-imx/clk-imx27.c +++ b/trunk/arch/arm/mach-imx/clk-imx27.c @@ -236,9 +236,9 @@ int __init mx27_clocks_init(unsigned long fref) clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx21-fb.0"); clk_register_clkdev(clk[csi_ahb_gate], "ahb", "imx27-camera.0"); clk_register_clkdev(clk[per4_gate], "per", "imx27-camera.0"); - clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); - clk_register_clkdev(clk[usb_ipg_gate], "ipg", "imx-udc-mx27"); - clk_register_clkdev(clk[usb_ahb_gate], "ahb", "imx-udc-mx27"); + clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); + clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc"); + clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc"); clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0"); clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0"); clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0"); diff --git a/trunk/arch/arm/mach-imx/clk-imx31.c b/trunk/arch/arm/mach-imx/clk-imx31.c index 16ccbd41dea9..8be64e0a4ace 100644 --- a/trunk/arch/arm/mach-imx/clk-imx31.c +++ b/trunk/arch/arm/mach-imx/clk-imx31.c @@ -139,9 +139,9 @@ int __init mx31_clocks_init(unsigned long fref) clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2"); clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); - clk_register_clkdev(clk[usb_div_post], "per", "imx-udc-mx27"); - clk_register_clkdev(clk[usb_gate], "ahb", "imx-udc-mx27"); - clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); + clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc"); + clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc"); + clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); /* i.mx31 has the i.mx21 type uart */ clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0"); diff --git a/trunk/arch/arm/mach-imx/clk-imx35.c b/trunk/arch/arm/mach-imx/clk-imx35.c index f0727e80815d..66f3d65ea275 100644 --- a/trunk/arch/arm/mach-imx/clk-imx35.c +++ b/trunk/arch/arm/mach-imx/clk-imx35.c @@ -251,9 +251,9 @@ int __init mx35_clocks_init() clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2"); clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2"); clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2"); - clk_register_clkdev(clk[usb_div], "per", "imx-udc-mx27"); - clk_register_clkdev(clk[ipg], "ipg", "imx-udc-mx27"); - clk_register_clkdev(clk[usbotg_gate], "ahb", "imx-udc-mx27"); + clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc"); + clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc"); + clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc"); clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0"); clk_register_clkdev(clk[nfc_div], NULL, "imx25-nand.0"); clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0"); diff --git a/trunk/arch/arm/mach-imx/clk-imx51-imx53.c b/trunk/arch/arm/mach-imx/clk-imx51-imx53.c index fb7cb841b64c..579023f59dc1 100644 --- a/trunk/arch/arm/mach-imx/clk-imx51-imx53.c +++ b/trunk/arch/arm/mach-imx/clk-imx51-imx53.c @@ -269,9 +269,9 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil, clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2"); clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2"); clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2"); - clk_register_clkdev(clk[usboh3_per_gate], "per", "imx-udc-mx51"); - clk_register_clkdev(clk[usboh3_gate], "ipg", "imx-udc-mx51"); - clk_register_clkdev(clk[usboh3_gate], "ahb", "imx-udc-mx51"); + clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc"); + clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc"); + clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc"); clk_register_clkdev(clk[nfc_gate], NULL, "imx51-nand"); clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0"); clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1"); diff --git a/trunk/arch/arm/mach-imx/clk-imx6q.c b/trunk/arch/arm/mach-imx/clk-imx6q.c index c0c4e723b7f5..7f2c10c7413a 100644 --- a/trunk/arch/arm/mach-imx/clk-imx6q.c +++ b/trunk/arch/arm/mach-imx/clk-imx6q.c @@ -436,9 +436,6 @@ int __init mx6q_clocks_init(void) for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) clk_prepare_enable(clk[clks_init_on[i]]); - /* Set initial power mode */ - imx6q_set_lpm(WAIT_CLOCKED); - np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); base = of_iomap(np, 0); WARN_ON(!base); diff --git a/trunk/arch/arm/mach-imx/common.h b/trunk/arch/arm/mach-imx/common.h index fa36fb84ab19..7191ab4434e5 100644 --- a/trunk/arch/arm/mach-imx/common.h +++ b/trunk/arch/arm/mach-imx/common.h @@ -142,7 +142,6 @@ extern int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode); extern void imx6q_clock_map_io(void); extern void imx_cpu_die(unsigned int cpu); -extern int imx_cpu_kill(unsigned int cpu); #ifdef CONFIG_PM extern void imx6q_pm_init(void); diff --git a/trunk/arch/arm/mach-imx/devices/devices-common.h b/trunk/arch/arm/mach-imx/devices/devices-common.h index 9bd5777ff0e7..6277baf1b7be 100644 --- a/trunk/arch/arm/mach-imx/devices/devices-common.h +++ b/trunk/arch/arm/mach-imx/devices/devices-common.h @@ -63,7 +63,6 @@ struct platform_device *__init imx_add_flexcan( #include struct imx_fsl_usb2_udc_data { - const char *devid; resource_size_t iobase; resource_size_t irq; }; diff --git a/trunk/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c b/trunk/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c index 3c06bd96e9cc..37e44398197b 100644 --- a/trunk/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c +++ b/trunk/arch/arm/mach-imx/devices/platform-fsl-usb2-udc.c @@ -11,36 +11,35 @@ #include "../hardware.h" #include "devices-common.h" -#define imx_fsl_usb2_udc_data_entry_single(soc, _devid) \ +#define imx_fsl_usb2_udc_data_entry_single(soc) \ { \ - .devid = _devid, \ .iobase = soc ## _USB_OTG_BASE_ADDR, \ .irq = soc ## _INT_USB_OTG, \ } #ifdef CONFIG_SOC_IMX25 const struct imx_fsl_usb2_udc_data imx25_fsl_usb2_udc_data __initconst = - imx_fsl_usb2_udc_data_entry_single(MX25, "imx-udc-mx27"); + imx_fsl_usb2_udc_data_entry_single(MX25); #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_fsl_usb2_udc_data imx27_fsl_usb2_udc_data __initconst = - imx_fsl_usb2_udc_data_entry_single(MX27, "imx-udc-mx27"); + imx_fsl_usb2_udc_data_entry_single(MX27); #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX31 const struct imx_fsl_usb2_udc_data imx31_fsl_usb2_udc_data __initconst = - imx_fsl_usb2_udc_data_entry_single(MX31, "imx-udc-mx27"); + imx_fsl_usb2_udc_data_entry_single(MX31); #endif /* ifdef CONFIG_SOC_IMX31 */ #ifdef CONFIG_SOC_IMX35 const struct imx_fsl_usb2_udc_data imx35_fsl_usb2_udc_data __initconst = - imx_fsl_usb2_udc_data_entry_single(MX35, "imx-udc-mx27"); + imx_fsl_usb2_udc_data_entry_single(MX35); #endif /* ifdef CONFIG_SOC_IMX35 */ #ifdef CONFIG_SOC_IMX51 const struct imx_fsl_usb2_udc_data imx51_fsl_usb2_udc_data __initconst = - imx_fsl_usb2_udc_data_entry_single(MX51, "imx-udc-mx51"); + imx_fsl_usb2_udc_data_entry_single(MX51); #endif struct platform_device *__init imx_add_fsl_usb2_udc( @@ -58,7 +57,7 @@ struct platform_device *__init imx_add_fsl_usb2_udc( .flags = IORESOURCE_IRQ, }, }; - return imx_add_platform_device_dmamask(data->devid, -1, + return imx_add_platform_device_dmamask("fsl-usb2-udc", -1, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); } diff --git a/trunk/arch/arm/mach-imx/devices/platform-imx-fb.c b/trunk/arch/arm/mach-imx/devices/platform-imx-fb.c index 25a47c616b2d..10b0ed39f07f 100644 --- a/trunk/arch/arm/mach-imx/devices/platform-imx-fb.c +++ b/trunk/arch/arm/mach-imx/devices/platform-imx-fb.c @@ -54,7 +54,7 @@ struct platform_device *__init imx_add_imx_fb( .flags = IORESOURCE_IRQ, }, }; - return imx_add_platform_device_dmamask(data->devid, 0, + return imx_add_platform_device_dmamask("imx-fb", 0, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); } diff --git a/trunk/arch/arm/mach-imx/hotplug.c b/trunk/arch/arm/mach-imx/hotplug.c index 7bc5fe15dda2..3dec962b0770 100644 --- a/trunk/arch/arm/mach-imx/hotplug.c +++ b/trunk/arch/arm/mach-imx/hotplug.c @@ -46,11 +46,9 @@ static inline void cpu_enter_lowpower(void) void imx_cpu_die(unsigned int cpu) { cpu_enter_lowpower(); - cpu_do_idle(); -} - -int imx_cpu_kill(unsigned int cpu) -{ imx_enable_cpu(cpu, false); - return 1; + + /* spin here until hardware takes it down */ + while (1) + ; } diff --git a/trunk/include/linux/platform_data/imx-iram.h b/trunk/arch/arm/mach-imx/iram.h similarity index 100% rename from trunk/include/linux/platform_data/imx-iram.h rename to trunk/arch/arm/mach-imx/iram.h diff --git a/trunk/arch/arm/mach-imx/iram_alloc.c b/trunk/arch/arm/mach-imx/iram_alloc.c index e05cf407db65..6c80424f678e 100644 --- a/trunk/arch/arm/mach-imx/iram_alloc.c +++ b/trunk/arch/arm/mach-imx/iram_alloc.c @@ -22,7 +22,8 @@ #include #include #include -#include "linux/platform_data/imx-iram.h" + +#include "iram.h" static unsigned long iram_phys_base; static void __iomem *iram_virt_base; diff --git a/trunk/arch/arm/mach-imx/platsmp.c b/trunk/arch/arm/mach-imx/platsmp.c index 66fae885c842..3777b805b76b 100644 --- a/trunk/arch/arm/mach-imx/platsmp.c +++ b/trunk/arch/arm/mach-imx/platsmp.c @@ -92,6 +92,5 @@ struct smp_operations imx_smp_ops __initdata = { .smp_boot_secondary = imx_boot_secondary, #ifdef CONFIG_HOTPLUG_CPU .cpu_die = imx_cpu_die, - .cpu_kill = imx_cpu_kill, #endif }; diff --git a/trunk/arch/arm/mach-imx/pm-imx6q.c b/trunk/arch/arm/mach-imx/pm-imx6q.c index ee42d20cba19..a17543da602d 100644 --- a/trunk/arch/arm/mach-imx/pm-imx6q.c +++ b/trunk/arch/arm/mach-imx/pm-imx6q.c @@ -41,7 +41,6 @@ static int imx6q_pm_enter(suspend_state_t state) cpu_suspend(0, imx6q_suspend_finish); imx_smp_prepare(); imx_gpc_post_resume(); - imx6q_set_lpm(WAIT_CLOCKED); break; default: return -EINVAL; diff --git a/trunk/arch/arm/mach-integrator/pci_v3.c b/trunk/arch/arm/mach-integrator/pci_v3.c index e7fcea7f3300..be50e795536d 100644 --- a/trunk/arch/arm/mach-integrator/pci_v3.c +++ b/trunk/arch/arm/mach-integrator/pci_v3.c @@ -475,12 +475,13 @@ int __init pci_v3_setup(int nr, struct pci_sys_data *sys) { int ret = 0; - if (!ap_syscon_base) - return -EINVAL; - if (nr == 0) { sys->mem_offset = PHYS_PCI_MEM_BASE; ret = pci_v3_setup_resources(sys); + /* Remap the Integrator system controller */ + ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); + if (!ap_syscon_base) + return -EINVAL; } return ret; @@ -496,13 +497,6 @@ void __init pci_v3_preinit(void) unsigned int temp; int ret; - /* Remap the Integrator system controller */ - ap_syscon_base = ioremap(INTEGRATOR_SC_BASE, 0x100); - if (!ap_syscon_base) { - pr_err("unable to remap the AP syscon for PCIv3\n"); - return; - } - pcibios_min_mem = 0x00100000; /* diff --git a/trunk/arch/arm/mach-kirkwood/board-ns2.c b/trunk/arch/arm/mach-kirkwood/board-ns2.c index f4632a809f68..8821720ab5a4 100644 --- a/trunk/arch/arm/mach-kirkwood/board-ns2.c +++ b/trunk/arch/arm/mach-kirkwood/board-ns2.c @@ -18,11 +18,47 @@ #include #include #include "common.h" +#include "mpp.h" static struct mv643xx_eth_platform_data ns2_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; +static unsigned int ns2_mpp_config[] __initdata = { + MPP0_SPI_SCn, + MPP1_SPI_MOSI, + MPP2_SPI_SCK, + MPP3_SPI_MISO, + MPP4_NF_IO6, + MPP5_NF_IO7, + MPP6_SYSRST_OUTn, + MPP7_GPO, /* Fan speed (bit 1) */ + MPP8_TW0_SDA, + MPP9_TW0_SCK, + MPP10_UART0_TXD, + MPP11_UART0_RXD, + MPP12_GPO, /* Red led */ + MPP14_GPIO, /* USB fuse */ + MPP16_GPIO, /* SATA 0 power */ + MPP17_GPIO, /* SATA 1 power */ + MPP18_NF_IO0, + MPP19_NF_IO1, + MPP20_SATA1_ACTn, + MPP21_SATA0_ACTn, + MPP22_GPIO, /* Fan speed (bit 0) */ + MPP23_GPIO, /* Fan power */ + MPP24_GPIO, /* USB mode select */ + MPP25_GPIO, /* Fan rotation fail */ + MPP26_GPIO, /* USB device vbus */ + MPP28_GPIO, /* USB enable host vbus */ + MPP29_GPIO, /* Blue led (slow register) */ + MPP30_GPIO, /* Blue led (command register) */ + MPP31_GPIO, /* Board power off */ + MPP32_GPIO, /* Power button (0 = Released, 1 = Pushed) */ + MPP33_GPO, /* Fan speed (bit 2) */ + 0 +}; + #define NS2_GPIO_POWER_OFF 31 static void ns2_power_off(void) @@ -35,6 +71,8 @@ void __init ns2_init(void) /* * Basic setup. Needs to be called early. */ + kirkwood_mpp_conf(ns2_mpp_config); + if (of_machine_is_compatible("lacie,netspace_lite_v2") || of_machine_is_compatible("lacie,netspace_mini_v2")) ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); diff --git a/trunk/arch/arm/mach-mvebu/Makefile b/trunk/arch/arm/mach-mvebu/Makefile index 99df4df680fd..5dcb369b58aa 100644 --- a/trunk/arch/arm/mach-mvebu/Makefile +++ b/trunk/arch/arm/mach-mvebu/Makefile @@ -1,8 +1,6 @@ ccflags-$(CONFIG_ARCH_MULTIPLATFORM) := -I$(srctree)/$(src)/include \ -I$(srctree)/arch/arm/plat-orion/include -AFLAGS_coherency_ll.o := -Wa,-march=armv7-a - obj-y += system-controller.o obj-$(CONFIG_MACH_ARMADA_370_XP) += armada-370-xp.o irq-armada-370-xp.o addr-map.o coherency.o coherency_ll.o pmsu.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o diff --git a/trunk/arch/arm/mach-omap2/board-omap4panda.c b/trunk/arch/arm/mach-omap2/board-omap4panda.c index 769c1feee1c4..5c8e9cee2c2e 100644 --- a/trunk/arch/arm/mach-omap2/board-omap4panda.c +++ b/trunk/arch/arm/mach-omap2/board-omap4panda.c @@ -397,12 +397,6 @@ static struct omap_board_mux board_mux[] __initdata = { OMAP_PULL_ENA), OMAP4_MUX(ABE_MCBSP1_FSX, OMAP_MUX_MODE0 | OMAP_PIN_INPUT), - /* UART2 - BT/FM/GPS shared transport */ - OMAP4_MUX(UART2_CTS, OMAP_PIN_INPUT | OMAP_MUX_MODE0), - OMAP4_MUX(UART2_RTS, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), - OMAP4_MUX(UART2_RX, OMAP_PIN_INPUT | OMAP_MUX_MODE0), - OMAP4_MUX(UART2_TX, OMAP_PIN_OUTPUT | OMAP_MUX_MODE0), - { .reg_offset = OMAP_MUX_TERMINATOR }, }; diff --git a/trunk/arch/arm/mach-omap2/cclock2420_data.c b/trunk/arch/arm/mach-omap2/cclock2420_data.c index ab7e952d2070..7e5febe456d9 100644 --- a/trunk/arch/arm/mach-omap2/cclock2420_data.c +++ b/trunk/arch/arm/mach-omap2/cclock2420_data.c @@ -1935,8 +1935,6 @@ int __init omap2420_clk_init(void) omap2_init_clk_hw_omap_clocks(c->lk.clk); } - omap2xxx_clkt_vps_late_init(); - omap2_clk_disable_autoidle_all(); omap2_clk_enable_init_clocks(enable_init_clks, diff --git a/trunk/arch/arm/mach-omap2/cclock2430_data.c b/trunk/arch/arm/mach-omap2/cclock2430_data.c index eb3dab68d536..eda079b96c6a 100644 --- a/trunk/arch/arm/mach-omap2/cclock2430_data.c +++ b/trunk/arch/arm/mach-omap2/cclock2430_data.c @@ -2050,8 +2050,6 @@ int __init omap2430_clk_init(void) omap2_init_clk_hw_omap_clocks(c->lk.clk); } - omap2xxx_clkt_vps_late_init(); - omap2_clk_disable_autoidle_all(); omap2_clk_enable_init_clocks(enable_init_clks, diff --git a/trunk/arch/arm/mach-omap2/cclock44xx_data.c b/trunk/arch/arm/mach-omap2/cclock44xx_data.c index a2cc046b47f4..5789a5e25563 100644 --- a/trunk/arch/arm/mach-omap2/cclock44xx_data.c +++ b/trunk/arch/arm/mach-omap2/cclock44xx_data.c @@ -2026,13 +2026,14 @@ int __init omap4xxx_clk_init(void) * On OMAP4460 the ABE DPLL fails to turn on if in idle low-power * state when turning the ABE clock domain. Workaround this by * locking the ABE DPLL on boot. - * Lock the ABE DPLL in any case to avoid issues with audio. */ - rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); - if (!rc) - rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); - if (rc) - pr_err("%s: failed to configure ABE DPLL!\n", __func__); + if (cpu_is_omap446x()) { + rc = clk_set_parent(&abe_dpll_refclk_mux_ck, &sys_32k_ck); + if (!rc) + rc = clk_set_rate(&dpll_abe_ck, OMAP4_DPLL_ABE_DEFFREQ); + if (rc) + pr_err("%s: failed to configure ABE DPLL!\n", __func__); + } return 0; } diff --git a/trunk/arch/arm/mach-omap2/devices.c b/trunk/arch/arm/mach-omap2/devices.c index 626f3ea3142f..5e304d0719a2 100644 --- a/trunk/arch/arm/mach-omap2/devices.c +++ b/trunk/arch/arm/mach-omap2/devices.c @@ -639,7 +639,7 @@ static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev) return cnt; } -static void __init omap_init_ocp2scp(void) +static void omap_init_ocp2scp(void) { struct omap_hwmod *oh; struct platform_device *pdev; diff --git a/trunk/arch/arm/mach-omap2/drm.c b/trunk/arch/arm/mach-omap2/drm.c index 2a2cfa88ddbf..4c7566c7e24a 100644 --- a/trunk/arch/arm/mach-omap2/drm.c +++ b/trunk/arch/arm/mach-omap2/drm.c @@ -25,7 +25,6 @@ #include #include -#include "soc.h" #include "omap_device.h" #include "omap_hwmod.h" @@ -57,7 +56,7 @@ static int __init omap_init_drm(void) oh->name); } - platform_data.omaprev = GET_OMAP_TYPE; + platform_data.omaprev = GET_OMAP_REVISION(); return platform_device_register(&omap_drm_device); diff --git a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c index 793f54ac7d14..129d5081ed15 100644 --- a/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c +++ b/trunk/arch/arm/mach-omap2/omap_hwmod_44xx_data.c @@ -2132,12 +2132,8 @@ static struct omap_hwmod omap44xx_mcpdm_hwmod = { * currently reset very early during boot, before I2C is * available, so it doesn't seem that we have any choice in * the kernel other than to avoid resetting it. - * - * Also, McPDM needs to be configured to NO_IDLE mode when it - * is in used otherwise vital clocks will be gated which - * results 'slow motion' audio playback. */ - .flags = HWMOD_EXT_OPT_MAIN_CLK | HWMOD_SWSUP_SIDLE, + .flags = HWMOD_EXT_OPT_MAIN_CLK, .mpu_irqs = omap44xx_mcpdm_irqs, .sdma_reqs = omap44xx_mcpdm_sdma_reqs, .main_clk = "mcpdm_fck", diff --git a/trunk/arch/arm/mach-omap2/timer.c b/trunk/arch/arm/mach-omap2/timer.c index b8ad6e632bb8..691aa674665a 100644 --- a/trunk/arch/arm/mach-omap2/timer.c +++ b/trunk/arch/arm/mach-omap2/timer.c @@ -165,11 +165,15 @@ static struct device_node * __init omap_get_timer_dt(struct of_device_id *match, struct device_node *np; for_each_matching_node(np, match) { - if (!of_device_is_available(np)) + if (!of_device_is_available(np)) { + of_node_put(np); continue; + } - if (property && !of_get_property(np, property, NULL)) + if (property && !of_get_property(np, property, NULL)) { + of_node_put(np); continue; + } of_add_property(np, &device_disabled); return np; diff --git a/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h b/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h index b6132aa95dc0..a611ad3153c7 100644 --- a/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h +++ b/trunk/arch/arm/mach-pxa/include/mach/mfp-pxa27x.h @@ -463,9 +463,6 @@ GPIO76_LCD_PCLK, \ GPIO77_LCD_BIAS -/* these enable a work-around for a hw bug in pxa27x during ac97 warm reset */ -#define GPIO113_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO113, AF0, DEFAULT) -#define GPIO95_AC97_nRESET_GPIO_HIGH MFP_CFG_OUT(GPIO95, AF0, DEFAULT) extern int keypad_set_wake(unsigned int on); #endif /* __ASM_ARCH_MFP_PXA27X_H */ diff --git a/trunk/arch/arm/mach-pxa/pxa27x.c b/trunk/arch/arm/mach-pxa/pxa27x.c index 616cb87b6179..8047ee0effc5 100644 --- a/trunk/arch/arm/mach-pxa/pxa27x.c +++ b/trunk/arch/arm/mach-pxa/pxa27x.c @@ -47,9 +47,9 @@ void pxa27x_clear_otgph(void) EXPORT_SYMBOL(pxa27x_clear_otgph); static unsigned long ac97_reset_config[] = { - GPIO113_AC97_nRESET_GPIO_HIGH, + GPIO113_GPIO, GPIO113_AC97_nRESET, - GPIO95_AC97_nRESET_GPIO_HIGH, + GPIO95_GPIO, GPIO95_AC97_nRESET, }; diff --git a/trunk/arch/arm/mach-realview/include/mach/irqs-eb.h b/trunk/arch/arm/mach-realview/include/mach/irqs-eb.h index 44754230fdcc..d6b5073692d2 100644 --- a/trunk/arch/arm/mach-realview/include/mach/irqs-eb.h +++ b/trunk/arch/arm/mach-realview/include/mach/irqs-eb.h @@ -115,7 +115,7 @@ /* * Only define NR_IRQS if less than NR_IRQS_EB */ -#define NR_IRQS_EB (IRQ_EB_GIC_START + 128) +#define NR_IRQS_EB (IRQ_EB_GIC_START + 96) #if defined(CONFIG_MACH_REALVIEW_EB) \ && (!defined(NR_IRQS) || (NR_IRQS < NR_IRQS_EB)) diff --git a/trunk/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/trunk/arch/arm/mach-s3c64xx/mach-crag6410-module.c index 755c0bb119f4..553059f51841 100644 --- a/trunk/arch/arm/mach-s3c64xx/mach-crag6410-module.c +++ b/trunk/arch/arm/mach-s3c64xx/mach-crag6410-module.c @@ -47,7 +47,7 @@ static struct spi_board_info wm1253_devs[] = { .bus_num = 0, .chip_select = 0, .mode = SPI_MODE_0, - .irq = S3C_EINT(4), + .irq = S3C_EINT(5), .controller_data = &wm0010_spi_csinfo, .platform_data = &wm0010_pdata, }, diff --git a/trunk/arch/arm/mach-s3c64xx/pm.c b/trunk/arch/arm/mach-s3c64xx/pm.c index d2e1a16690bd..7feb426fc202 100644 --- a/trunk/arch/arm/mach-s3c64xx/pm.c +++ b/trunk/arch/arm/mach-s3c64xx/pm.c @@ -338,10 +338,8 @@ int __init s3c64xx_pm_init(void) for (i = 0; i < ARRAY_SIZE(s3c64xx_pm_domains); i++) pm_genpd_init(&s3c64xx_pm_domains[i]->pd, NULL, false); -#ifdef CONFIG_S3C_DEV_FB if (dev_get_platdata(&s3c_device_fb.dev)) pm_genpd_add_device(&s3c64xx_pm_f.pd, &s3c_device_fb.dev); -#endif return 0; } diff --git a/trunk/arch/arm/mm/dma-mapping.c b/trunk/arch/arm/mm/dma-mapping.c index dda3904dc64c..6b2fb87c8698 100644 --- a/trunk/arch/arm/mm/dma-mapping.c +++ b/trunk/arch/arm/mm/dma-mapping.c @@ -640,7 +640,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, if (is_coherent || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); - else if (!(gfp & __GFP_WAIT)) + else if (gfp & GFP_ATOMIC) addr = __alloc_from_pool(size, &page); else if (!IS_ENABLED(CONFIG_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); @@ -774,27 +774,25 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { - unsigned long pfn; - size_t left = size; - - pfn = page_to_pfn(page) + offset / PAGE_SIZE; - offset %= PAGE_SIZE; - /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ + size_t left = size; do { size_t len = left; void *vaddr; - page = pfn_to_page(pfn); - if (PageHighMem(page)) { - if (len + offset > PAGE_SIZE) + if (len + offset > PAGE_SIZE) { + if (offset >= PAGE_SIZE) { + page += offset / PAGE_SIZE; + offset %= PAGE_SIZE; + } len = PAGE_SIZE - offset; + } vaddr = kmap_high_get(page); if (vaddr) { vaddr += offset; @@ -811,7 +809,7 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, op(vaddr, len, dir); } offset = 0; - pfn++; + page++; left -= len; } while (left); } diff --git a/trunk/arch/arm/mm/idmap.c b/trunk/arch/arm/mm/idmap.c index 2dffc010cc41..99db769307ec 100644 --- a/trunk/arch/arm/mm/idmap.c +++ b/trunk/arch/arm/mm/idmap.c @@ -1,6 +1,4 @@ -#include #include -#include #include #include @@ -8,7 +6,6 @@ #include #include #include -#include pgd_t *idmap_pgd; @@ -62,17 +59,11 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, } while (pud++, addr = next, addr != end); } -static void identity_mapping_add(pgd_t *pgd, const char *text_start, - const char *text_end, unsigned long prot) +static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) { - unsigned long addr, end; - unsigned long next; - - addr = virt_to_phys(text_start); - end = virt_to_phys(text_end); - - prot |= PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; + unsigned long prot, next; + prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) prot |= PMD_BIT4; @@ -83,52 +74,28 @@ static void identity_mapping_add(pgd_t *pgd, const char *text_start, } while (pgd++, addr = next, addr != end); } -#if defined(CONFIG_ARM_VIRT_EXT) && defined(CONFIG_ARM_LPAE) -pgd_t *hyp_pgd; - -extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; - -static int __init init_static_idmap_hyp(void) -{ - hyp_pgd = kzalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL); - if (!hyp_pgd) - return -ENOMEM; - - pr_info("Setting up static HYP identity map for 0x%p - 0x%p\n", - __hyp_idmap_text_start, __hyp_idmap_text_end); - identity_mapping_add(hyp_pgd, __hyp_idmap_text_start, - __hyp_idmap_text_end, PMD_SECT_AP1); - - return 0; -} -#else -static int __init init_static_idmap_hyp(void) -{ - return 0; -} -#endif - extern char __idmap_text_start[], __idmap_text_end[]; static int __init init_static_idmap(void) { - int ret; + phys_addr_t idmap_start, idmap_end; idmap_pgd = pgd_alloc(&init_mm); if (!idmap_pgd) return -ENOMEM; - pr_info("Setting up static identity map for 0x%p - 0x%p\n", - __idmap_text_start, __idmap_text_end); - identity_mapping_add(idmap_pgd, __idmap_text_start, - __idmap_text_end, 0); + /* Add an identity mapping for the physical address of the section. */ + idmap_start = virt_to_phys((void *)__idmap_text_start); + idmap_end = virt_to_phys((void *)__idmap_text_end); - ret = init_static_idmap_hyp(); + pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", + (long long)idmap_start, (long long)idmap_end); + identity_mapping_add(idmap_pgd, idmap_start, idmap_end); /* Flush L1 for the hardware to see this page table content */ flush_cache_louis(); - return ret; + return 0; } early_initcall(init_static_idmap); diff --git a/trunk/arch/arm/mm/mmu.c b/trunk/arch/arm/mm/mmu.c index 8fcf8bd3ee49..9f0610243bd6 100644 --- a/trunk/arch/arm/mm/mmu.c +++ b/trunk/arch/arm/mm/mmu.c @@ -57,9 +57,6 @@ static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; static unsigned int ecc_mask __initdata = 0; pgprot_t pgprot_user; pgprot_t pgprot_kernel; -pgprot_t pgprot_hyp_device; -pgprot_t pgprot_s2; -pgprot_t pgprot_s2_device; EXPORT_SYMBOL(pgprot_user); EXPORT_SYMBOL(pgprot_kernel); @@ -69,46 +66,34 @@ struct cachepolicy { unsigned int cr_mask; pmdval_t pmd; pteval_t pte; - pteval_t pte_s2; }; -#ifdef CONFIG_ARM_LPAE -#define s2_policy(policy) policy -#else -#define s2_policy(policy) 0 -#endif - static struct cachepolicy cache_policies[] __initdata = { { .policy = "uncached", .cr_mask = CR_W|CR_C, .pmd = PMD_SECT_UNCACHED, .pte = L_PTE_MT_UNCACHED, - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), }, { .policy = "buffered", .cr_mask = CR_C, .pmd = PMD_SECT_BUFFERED, .pte = L_PTE_MT_BUFFERABLE, - .pte_s2 = s2_policy(L_PTE_S2_MT_UNCACHED), }, { .policy = "writethrough", .cr_mask = 0, .pmd = PMD_SECT_WT, .pte = L_PTE_MT_WRITETHROUGH, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITETHROUGH), }, { .policy = "writeback", .cr_mask = 0, .pmd = PMD_SECT_WB, .pte = L_PTE_MT_WRITEBACK, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), }, { .policy = "writealloc", .cr_mask = 0, .pmd = PMD_SECT_WBWA, .pte = L_PTE_MT_WRITEALLOC, - .pte_s2 = s2_policy(L_PTE_S2_MT_WRITEBACK), } }; @@ -298,7 +283,7 @@ static struct mem_type mem_types[] = { }, [MT_MEMORY_SO] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | - L_PTE_MT_UNCACHED | L_PTE_XN, + L_PTE_MT_UNCACHED, .prot_l1 = PMD_TYPE_TABLE, .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S | PMD_SECT_UNCACHED | PMD_SECT_XN, @@ -325,7 +310,6 @@ static void __init build_mem_type_table(void) struct cachepolicy *cp; unsigned int cr = get_cr(); pteval_t user_pgprot, kern_pgprot, vecs_pgprot; - pteval_t hyp_device_pgprot, s2_pgprot, s2_device_pgprot; int cpu_arch = cpu_architecture(); int i; @@ -437,8 +421,6 @@ static void __init build_mem_type_table(void) */ cp = &cache_policies[cachepolicy]; vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; - s2_pgprot = cp->pte_s2; - hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte; /* * ARMv6 and above have extended page tables. @@ -462,7 +444,6 @@ static void __init build_mem_type_table(void) user_pgprot |= L_PTE_SHARED; kern_pgprot |= L_PTE_SHARED; vecs_pgprot |= L_PTE_SHARED; - s2_pgprot |= L_PTE_SHARED; mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S; mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED; mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S; @@ -517,9 +498,6 @@ static void __init build_mem_type_table(void) pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | kern_pgprot); - pgprot_s2 = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | s2_pgprot); - pgprot_s2_device = __pgprot(s2_device_pgprot); - pgprot_hyp_device = __pgprot(hyp_device_pgprot); mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; diff --git a/trunk/arch/arm/plat-versatile/headsmp.S b/trunk/arch/arm/plat-versatile/headsmp.S index b178d44e9eaa..dd703ef09b8d 100644 --- a/trunk/arch/arm/plat-versatile/headsmp.S +++ b/trunk/arch/arm/plat-versatile/headsmp.S @@ -20,7 +20,7 @@ */ ENTRY(versatile_secondary_startup) mrc p15, 0, r0, c0, c0, 5 - bic r0, #0xff000000 + and r0, r0, #15 adr r4, 1f ldmia r4, {r5, r6} sub r4, r4, r5 diff --git a/trunk/arch/arm/vfp/entry.S b/trunk/arch/arm/vfp/entry.S index 323ce1a62bbf..cc926c985981 100644 --- a/trunk/arch/arm/vfp/entry.S +++ b/trunk/arch/arm/vfp/entry.S @@ -22,7 +22,7 @@ @ IRQs disabled. @ ENTRY(do_vfp) -#ifdef CONFIG_PREEMPT_COUNT +#ifdef CONFIG_PREEMPT ldr r4, [r10, #TI_PREEMPT] @ get preempt count add r11, r4, #1 @ increment it str r11, [r10, #TI_PREEMPT] @@ -35,7 +35,7 @@ ENTRY(do_vfp) ENDPROC(do_vfp) ENTRY(vfp_null_entry) -#ifdef CONFIG_PREEMPT_COUNT +#ifdef CONFIG_PREEMPT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -53,7 +53,7 @@ ENDPROC(vfp_null_entry) __INIT ENTRY(vfp_testing_entry) -#ifdef CONFIG_PREEMPT_COUNT +#ifdef CONFIG_PREEMPT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it diff --git a/trunk/arch/arm/vfp/vfphw.S b/trunk/arch/arm/vfp/vfphw.S index dd5e56f95f3f..ea0349f63586 100644 --- a/trunk/arch/arm/vfp/vfphw.S +++ b/trunk/arch/arm/vfp/vfphw.S @@ -168,7 +168,7 @@ vfp_hw_state_valid: @ else it's one 32-bit instruction, so @ always subtract 4 from the following @ instruction address. -#ifdef CONFIG_PREEMPT_COUNT +#ifdef CONFIG_PREEMPT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it @@ -192,7 +192,7 @@ look_for_VFP_exceptions: @ not recognised by VFP DBGSTR "not VFP" -#ifdef CONFIG_PREEMPT_COUNT +#ifdef CONFIG_PREEMPT get_thread_info r10 ldr r4, [r10, #TI_PREEMPT] @ get preempt count sub r11, r4, #1 @ decrement it diff --git a/trunk/arch/arm64/boot/dts/Makefile b/trunk/arch/arm64/boot/dts/Makefile index 32ac0aef0068..801e2d7fcbc6 100644 --- a/trunk/arch/arm64/boot/dts/Makefile +++ b/trunk/arch/arm64/boot/dts/Makefile @@ -1,5 +1,4 @@ targets += dtbs -targets += $(dtb-y) dtbs: $(addprefix $(obj)/, $(dtb-y)) diff --git a/trunk/arch/arm64/include/asm/elf.h b/trunk/arch/arm64/include/asm/elf.h index fe32c0e4ac01..07fea290d7c1 100644 --- a/trunk/arch/arm64/include/asm/elf.h +++ b/trunk/arch/arm64/include/asm/elf.h @@ -26,10 +26,7 @@ typedef unsigned long elf_greg_t; -#define ELF_NGREG (sizeof(struct user_pt_regs) / sizeof(elf_greg_t)) -#define ELF_CORE_COPY_REGS(dest, regs) \ - *(struct user_pt_regs *)&(dest) = (regs)->user_regs; - +#define ELF_NGREG (sizeof (struct pt_regs) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; typedef struct user_fpsimd_state elf_fpregset_t; diff --git a/trunk/arch/arm64/include/asm/pgtable.h b/trunk/arch/arm64/include/asm/pgtable.h index e333a243bfcc..64b133949502 100644 --- a/trunk/arch/arm64/include/asm/pgtable.h +++ b/trunk/arch/arm64/include/asm/pgtable.h @@ -24,8 +24,7 @@ /* * Software defined PTE bits definition. */ -#define PTE_VALID (_AT(pteval_t, 1) << 0) -#define PTE_PROT_NONE (_AT(pteval_t, 1) << 1) /* only when !PTE_VALID */ +#define PTE_VALID (_AT(pteval_t, 1) << 0) /* pte_present() check */ #define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */ #define PTE_DIRTY (_AT(pteval_t, 1) << 55) #define PTE_SPECIAL (_AT(pteval_t, 1) << 56) @@ -61,12 +60,9 @@ extern void __pgd_error(const char *file, int line, unsigned long val); extern pgprot_t pgprot_default; -#define __pgprot_modify(prot,mask,bits) \ - __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) - -#define _MOD_PROT(p, b) __pgprot_modify(p, 0, b) +#define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) -#define PAGE_NONE __pgprot_modify(pgprot_default, PTE_TYPE_MASK, PTE_PROT_NONE) +#define PAGE_NONE _MOD_PROT(pgprot_default, PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) #define PAGE_SHARED _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define PAGE_SHARED_EXEC _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN) #define PAGE_COPY _MOD_PROT(pgprot_default, PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) @@ -76,7 +72,7 @@ extern pgprot_t pgprot_default; #define PAGE_KERNEL _MOD_PROT(pgprot_default, PTE_PXN | PTE_UXN | PTE_DIRTY) #define PAGE_KERNEL_EXEC _MOD_PROT(pgprot_default, PTE_UXN | PTE_DIRTY) -#define __PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_TYPE_MASK) | PTE_PROT_NONE) +#define __PAGE_NONE __pgprot(_PAGE_DEFAULT | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) #define __PAGE_SHARED __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN) #define __PAGE_SHARED_EXEC __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN) #define __PAGE_COPY __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_NG | PTE_PXN | PTE_UXN | PTE_RDONLY) @@ -129,15 +125,16 @@ extern struct page *empty_zero_page; /* * The following only work if pte_present(). Undefined behaviour otherwise. */ -#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) +#define pte_present(pte) (pte_val(pte) & PTE_VALID) #define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY) #define pte_young(pte) (pte_val(pte) & PTE_AF) #define pte_special(pte) (pte_val(pte) & PTE_SPECIAL) #define pte_write(pte) (!(pte_val(pte) & PTE_RDONLY)) #define pte_exec(pte) (!(pte_val(pte) & PTE_UXN)) -#define pte_valid_user(pte) \ - ((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER)) +#define pte_present_exec_user(pte) \ + ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == \ + (PTE_VALID | PTE_USER)) #define PTE_BIT_FUNC(fn,op) \ static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } @@ -160,13 +157,10 @@ extern void __sync_icache_dcache(pte_t pteval, unsigned long addr); static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte) { - if (pte_valid_user(pte)) { - if (pte_exec(pte)) - __sync_icache_dcache(pte, addr); - if (!pte_dirty(pte)) - pte = pte_wrprotect(pte); - } - + if (pte_present_exec_user(pte)) + __sync_icache_dcache(pte, addr); + if (!pte_dirty(pte)) + pte = pte_wrprotect(pte); set_pte(ptep, pte); } @@ -176,6 +170,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, #define pte_huge(pte) ((pte_val(pte) & PTE_TYPE_MASK) == PTE_TYPE_HUGEPAGE) #define pte_mkhuge(pte) (__pte((pte_val(pte) & ~PTE_TYPE_MASK) | PTE_TYPE_HUGEPAGE)) +#define __pgprot_modify(prot,mask,bits) \ + __pgprot((pgprot_val(prot) & ~(mask)) | (bits)) + #define __HAVE_ARCH_PTE_SPECIAL /* @@ -267,8 +264,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) { - const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY | - PTE_PROT_NONE | PTE_VALID; + const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY; pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask); return pte; } diff --git a/trunk/arch/arm64/include/asm/unistd32.h b/trunk/arch/arm64/include/asm/unistd32.h index 5ef47ba3ed45..58432625fdb3 100644 --- a/trunk/arch/arm64/include/asm/unistd32.h +++ b/trunk/arch/arm64/include/asm/unistd32.h @@ -395,13 +395,8 @@ __SYSCALL(370, sys_name_to_handle_at) __SYSCALL(371, compat_sys_open_by_handle_at) __SYSCALL(372, compat_sys_clock_adjtime) __SYSCALL(373, sys_syncfs) -__SYSCALL(374, compat_sys_sendmmsg) -__SYSCALL(375, sys_setns) -__SYSCALL(376, compat_sys_process_vm_readv) -__SYSCALL(377, compat_sys_process_vm_writev) -__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */ -#define __NR_compat_syscalls 379 +#define __NR_compat_syscalls 374 /* * Compat syscall numbers used by the AArch64 kernel. diff --git a/trunk/arch/arm64/kernel/vdso.c b/trunk/arch/arm64/kernel/vdso.c index 6a389dc1bd49..c958cb84d75f 100644 --- a/trunk/arch/arm64/kernel/vdso.c +++ b/trunk/arch/arm64/kernel/vdso.c @@ -252,6 +252,10 @@ void update_vsyscall(struct timekeeper *tk) void update_vsyscall_tz(void) { + ++vdso_data->tb_seq_count; + smp_wmb(); vdso_data->tz_minuteswest = sys_tz.tz_minuteswest; vdso_data->tz_dsttime = sys_tz.tz_dsttime; + smp_wmb(); + ++vdso_data->tb_seq_count; } diff --git a/trunk/arch/arm64/kernel/vdso/gettimeofday.S b/trunk/arch/arm64/kernel/vdso/gettimeofday.S index f0a6d10b5211..8bf658d974f9 100644 --- a/trunk/arch/arm64/kernel/vdso/gettimeofday.S +++ b/trunk/arch/arm64/kernel/vdso/gettimeofday.S @@ -73,6 +73,8 @@ ENTRY(__kernel_gettimeofday) /* If tz is NULL, return 0. */ cbz x1, 3f ldp w4, w5, [vdso_data, #VDSO_TZ_MINWEST] + seqcnt_read w9 + seqcnt_check w9, 1b stp w4, w5, [x1, #TZ_MINWEST] 3: mov x0, xzr diff --git a/trunk/arch/avr32/include/asm/dma-mapping.h b/trunk/arch/avr32/include/asm/dma-mapping.h index b3d18f9f3e8d..aaf5199d8fcb 100644 --- a/trunk/arch/avr32/include/asm/dma-mapping.h +++ b/trunk/arch/avr32/include/asm/dma-mapping.h @@ -336,14 +336,4 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) -/* drivers/base/dma-mapping.c */ -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - #endif /* __ASM_AVR32_DMA_MAPPING_H */ diff --git a/trunk/arch/blackfin/include/asm/dma-mapping.h b/trunk/arch/blackfin/include/asm/dma-mapping.h index 054d9ec57d9d..bbf461076a0a 100644 --- a/trunk/arch/blackfin/include/asm/dma-mapping.h +++ b/trunk/arch/blackfin/include/asm/dma-mapping.h @@ -154,14 +154,4 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, _dma_sync((dma_addr_t)vaddr, size, dir); } -/* drivers/base/dma-mapping.c */ -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - #endif /* _BLACKFIN_DMA_MAPPING_H */ diff --git a/trunk/arch/c6x/include/asm/dma-mapping.h b/trunk/arch/c6x/include/asm/dma-mapping.h index 88bd0d899bdb..3c694065030f 100644 --- a/trunk/arch/c6x/include/asm/dma-mapping.h +++ b/trunk/arch/c6x/include/asm/dma-mapping.h @@ -89,19 +89,4 @@ extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent((d), (s), (h), (f)) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent((d), (s), (v), (h)) -/* Not supported for now */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} - #endif /* _ASM_C6X_DMA_MAPPING_H */ diff --git a/trunk/arch/cris/include/asm/dma-mapping.h b/trunk/arch/cris/include/asm/dma-mapping.h index 2f0f654f1b44..8588b2ccf854 100644 --- a/trunk/arch/cris/include/asm/dma-mapping.h +++ b/trunk/arch/cris/include/asm/dma-mapping.h @@ -158,15 +158,5 @@ dma_cache_sync(struct device *dev, void *vaddr, size_t size, { } -/* drivers/base/dma-mapping.c */ -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - #endif diff --git a/trunk/arch/frv/include/asm/dma-mapping.h b/trunk/arch/frv/include/asm/dma-mapping.h index 1746a2b8e6e7..dfb811002c64 100644 --- a/trunk/arch/frv/include/asm/dma-mapping.h +++ b/trunk/arch/frv/include/asm/dma-mapping.h @@ -132,19 +132,4 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, flush_write_buffers(); } -/* Not supported for now */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} - #endif /* _ASM_DMA_MAPPING_H */ diff --git a/trunk/arch/ia64/kernel/ptrace.c b/trunk/arch/ia64/kernel/ptrace.c index b7a5fffe0924..4265ff64219b 100644 --- a/trunk/arch/ia64/kernel/ptrace.c +++ b/trunk/arch/ia64/kernel/ptrace.c @@ -672,6 +672,33 @@ ptrace_attach_sync_user_rbs (struct task_struct *child) read_unlock(&tasklist_lock); } +static inline int +thread_matches (struct task_struct *thread, unsigned long addr) +{ + unsigned long thread_rbs_end; + struct pt_regs *thread_regs; + + if (ptrace_check_attach(thread, 0) < 0) + /* + * If the thread is not in an attachable state, we'll + * ignore it. The net effect is that if ADDR happens + * to overlap with the portion of the thread's + * register backing store that is currently residing + * on the thread's kernel stack, then ptrace() may end + * up accessing a stale value. But if the thread + * isn't stopped, that's a problem anyhow, so we're + * doing as well as we can... + */ + return 0; + + thread_regs = task_pt_regs(thread); + thread_rbs_end = ia64_get_user_rbs_end(thread, thread_regs, NULL); + if (!on_kernel_rbs(addr, thread_regs->ar_bspstore, thread_rbs_end)) + return 0; + + return 1; /* looks like we've got a winner */ +} + /* * Write f32-f127 back to task->thread.fph if it has been modified. */ diff --git a/trunk/arch/m68k/include/asm/dma-mapping.h b/trunk/arch/m68k/include/asm/dma-mapping.h index 292805f0762e..17f7a45948ea 100644 --- a/trunk/arch/m68k/include/asm/dma-mapping.h +++ b/trunk/arch/m68k/include/asm/dma-mapping.h @@ -21,22 +21,6 @@ extern void *dma_alloc_coherent(struct device *, size_t, extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); -static inline void *dma_alloc_attrs(struct device *dev, size_t size, - dma_addr_t *dma_handle, gfp_t flag, - struct dma_attrs *attrs) -{ - /* attrs is not supported and ignored */ - return dma_alloc_coherent(dev, size, dma_handle, flag); -} - -static inline void dma_free_attrs(struct device *dev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, - struct dma_attrs *attrs) -{ - /* attrs is not supported and ignored */ - dma_free_coherent(dev, size, cpu_addr, dma_handle); -} - static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t flag) { @@ -115,14 +99,4 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t handle) #include #endif -/* drivers/base/dma-mapping.c */ -extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, - void *cpu_addr, dma_addr_t dma_addr, size_t size); -extern int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size); - -#define dma_mmap_coherent(d, v, c, h, s) dma_common_mmap(d, v, c, h, s) -#define dma_get_sgtable(d, t, v, h, s) dma_common_get_sgtable(d, t, v, h, s) - #endif /* _M68K_DMA_MAPPING_H */ diff --git a/trunk/arch/m68k/include/asm/pgtable_no.h b/trunk/arch/m68k/include/asm/pgtable_no.h index 037028f4ab70..bf86b29fe64a 100644 --- a/trunk/arch/m68k/include/asm/pgtable_no.h +++ b/trunk/arch/m68k/include/asm/pgtable_no.h @@ -64,8 +64,6 @@ extern unsigned int kobjsize(const void *objp); */ #define VMALLOC_START 0 #define VMALLOC_END 0xffffffff -#define KMAP_START 0 -#define KMAP_END 0xffffffff #include diff --git a/trunk/arch/m68k/include/asm/unistd.h b/trunk/arch/m68k/include/asm/unistd.h index f9337f614660..847994ce6804 100644 --- a/trunk/arch/m68k/include/asm/unistd.h +++ b/trunk/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include -#define NR_syscalls 349 +#define NR_syscalls 348 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/trunk/arch/m68k/include/uapi/asm/unistd.h b/trunk/arch/m68k/include/uapi/asm/unistd.h index 625f321001dc..b94bfbf90705 100644 --- a/trunk/arch/m68k/include/uapi/asm/unistd.h +++ b/trunk/arch/m68k/include/uapi/asm/unistd.h @@ -353,6 +353,5 @@ #define __NR_process_vm_readv 345 #define __NR_process_vm_writev 346 #define __NR_kcmp 347 -#define __NR_finit_module 348 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/trunk/arch/m68k/kernel/syscalltable.S b/trunk/arch/m68k/kernel/syscalltable.S index 3f04ea0ab802..c30da5b3f2db 100644 --- a/trunk/arch/m68k/kernel/syscalltable.S +++ b/trunk/arch/m68k/kernel/syscalltable.S @@ -368,5 +368,4 @@ ENTRY(sys_call_table) .long sys_process_vm_readv /* 345 */ .long sys_process_vm_writev .long sys_kcmp - .long sys_finit_module diff --git a/trunk/arch/m68k/mm/init.c b/trunk/arch/m68k/mm/init.c index afd8106fd83b..f0e05bce92f2 100644 --- a/trunk/arch/m68k/mm/init.c +++ b/trunk/arch/m68k/mm/init.c @@ -39,11 +39,6 @@ void *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); -#if !defined(CONFIG_SUN3) && !defined(CONFIG_COLDFIRE) -extern void init_pointer_table(unsigned long ptable); -extern pmd_t *zero_pgtable; -#endif - #ifdef CONFIG_MMU pg_data_t pg_data_map[MAX_NUMNODES]; @@ -74,6 +69,9 @@ void __init m68k_setup_node(int node) node_set_online(node); } +extern void init_pointer_table(unsigned long ptable); +extern pmd_t *zero_pgtable; + #else /* CONFIG_MMU */ /* diff --git a/trunk/arch/mips/bcm47xx/Kconfig b/trunk/arch/mips/bcm47xx/Kconfig index ba611927749b..d7af29f1fcf0 100644 --- a/trunk/arch/mips/bcm47xx/Kconfig +++ b/trunk/arch/mips/bcm47xx/Kconfig @@ -8,10 +8,8 @@ config BCM47XX_SSB select SSB_DRIVER_EXTIF select SSB_EMBEDDED select SSB_B43_PCI_BRIDGE if PCI - select SSB_DRIVER_PCICORE if PCI select SSB_PCICORE_HOSTMODE if PCI select SSB_DRIVER_GPIO - select GPIOLIB default y help Add support for old Broadcom BCM47xx boards with Sonics Silicon Backplane support. @@ -27,7 +25,6 @@ config BCM47XX_BCMA select BCMA_HOST_PCI if PCI select BCMA_DRIVER_PCI_HOSTMODE if PCI select BCMA_DRIVER_GPIO - select GPIOLIB default y help Add support for new Broadcom BCM47xx boards with Broadcom specific Advanced Microcontroller Bus. diff --git a/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c b/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c index 33b72144db31..9f883bf76953 100644 --- a/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c +++ b/trunk/arch/mips/cavium-octeon/executive/cvmx-l2c.c @@ -30,7 +30,6 @@ * measurement, and debugging facilities. */ -#include #include #include #include @@ -286,22 +285,22 @@ uint64_t cvmx_l2c_read_perf(uint32_t counter) */ static void fault_in(uint64_t addr, int len) { - char *ptr; - + volatile char *ptr; + volatile char dummy; /* * Adjust addr and length so we get all cache lines even for * small ranges spanning two cache lines. */ len += addr & CVMX_CACHE_LINE_MASK; addr &= ~CVMX_CACHE_LINE_MASK; - ptr = cvmx_phys_to_ptr(addr); + ptr = (volatile char *)cvmx_phys_to_ptr(addr); /* * Invalidate L1 cache to make sure all loads result in data * being in L2. */ CVMX_DCACHE_INVALIDATE; while (len > 0) { - ACCESS_ONCE(*ptr); + dummy += *ptr; len -= CVMX_CACHE_LINE_SIZE; ptr += CVMX_CACHE_LINE_SIZE; } diff --git a/trunk/arch/mips/include/uapi/asm/break.h b/trunk/arch/mips/include/asm/break.h similarity index 100% rename from trunk/arch/mips/include/uapi/asm/break.h rename to trunk/arch/mips/include/asm/break.h diff --git a/trunk/arch/mips/include/asm/dsp.h b/trunk/arch/mips/include/asm/dsp.h index 7bfad0520e25..e9bfc0813c72 100644 --- a/trunk/arch/mips/include/asm/dsp.h +++ b/trunk/arch/mips/include/asm/dsp.h @@ -16,7 +16,7 @@ #include #define DSP_DEFAULT 0x00000000 -#define DSP_MASK 0x3f +#define DSP_MASK 0x3ff #define __enable_dsp_hazard() \ do { \ diff --git a/trunk/arch/mips/include/asm/inst.h b/trunk/arch/mips/include/asm/inst.h index 33c34adbecfa..ab84064283db 100644 --- a/trunk/arch/mips/include/asm/inst.h +++ b/trunk/arch/mips/include/asm/inst.h @@ -353,7 +353,6 @@ union mips_instruction { struct u_format u_format; struct c_format c_format; struct r_format r_format; - struct p_format p_format; struct f_format f_format; struct ma_format ma_format; struct b_format b_format; diff --git a/trunk/arch/mips/include/asm/mach-pnx833x/war.h b/trunk/arch/mips/include/asm/mach-pnx833x/war.h index e410df4e1b3a..edaa06d9d492 100644 --- a/trunk/arch/mips/include/asm/mach-pnx833x/war.h +++ b/trunk/arch/mips/include/asm/mach-pnx833x/war.h @@ -21,4 +21,4 @@ #define R10000_LLSC_WAR 0 #define MIPS34K_MISSED_ITLB_WAR 0 -#endif /* __ASM_MIPS_MACH_PNX833X_WAR_H */ +#endif /* __ASM_MIPS_MACH_PNX8550_WAR_H */ diff --git a/trunk/arch/mips/include/asm/pgtable-64.h b/trunk/arch/mips/include/asm/pgtable-64.h index 013d5f781263..c63191055e69 100644 --- a/trunk/arch/mips/include/asm/pgtable-64.h +++ b/trunk/arch/mips/include/asm/pgtable-64.h @@ -230,7 +230,6 @@ static inline void pud_clear(pud_t *pudp) #else #define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) #define pfn_pte(pfn, prot) __pte(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) -#define pfn_pmd(pfn, prot) __pmd(((pfn) << _PFN_SHIFT) | pgprot_val(prot)) #endif #define __pgd_offset(address) pgd_index(address) diff --git a/trunk/arch/mips/include/uapi/asm/Kbuild b/trunk/arch/mips/include/uapi/asm/Kbuild index 77d4fb33f75a..a1a0452ac185 100644 --- a/trunk/arch/mips/include/uapi/asm/Kbuild +++ b/trunk/arch/mips/include/uapi/asm/Kbuild @@ -3,7 +3,6 @@ include include/uapi/asm-generic/Kbuild.asm header-y += auxvec.h header-y += bitsperlong.h -header-y += break.h header-y += byteorder.h header-y += cachectl.h header-y += errno.h diff --git a/trunk/arch/mips/kernel/ftrace.c b/trunk/arch/mips/kernel/ftrace.c index 83fa1460e294..6a2d758dd8e9 100644 --- a/trunk/arch/mips/kernel/ftrace.c +++ b/trunk/arch/mips/kernel/ftrace.c @@ -25,12 +25,6 @@ #define MCOUNT_OFFSET_INSNS 4 #endif -/* Arch override because MIPS doesn't need to run this from stop_machine() */ -void arch_ftrace_update_code(int command) -{ - ftrace_modify_all_code(command); -} - /* * Check if the address is in kernel space * @@ -95,24 +89,6 @@ static int ftrace_modify_code(unsigned long ip, unsigned int new_code) return 0; } -#ifndef CONFIG_64BIT -static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, - unsigned int new_code2) -{ - int faulted; - - safe_store_code(new_code1, ip, faulted); - if (unlikely(faulted)) - return -EFAULT; - ip += 4; - safe_store_code(new_code2, ip, faulted); - if (unlikely(faulted)) - return -EFAULT; - flush_icache_range(ip, ip + 8); /* original ip + 12 */ - return 0; -} -#endif - /* * The details about the calling site of mcount on MIPS * @@ -155,18 +131,8 @@ int ftrace_make_nop(struct module *mod, * needed. */ new = in_kernel_space(ip) ? INSN_NOP : INSN_B_1F; -#ifdef CONFIG_64BIT + return ftrace_modify_code(ip, new); -#else - /* - * On 32 bit MIPS platforms, gcc adds a stack adjust - * instruction in the delay slot after the branch to - * mcount and expects mcount to restore the sp on return. - * This is based on a legacy API and does nothing but - * waste instructions so it's being removed at runtime. - */ - return ftrace_modify_code_2(ip, new, INSN_NOP); -#endif } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) diff --git a/trunk/arch/mips/kernel/mcount.S b/trunk/arch/mips/kernel/mcount.S index 165867673357..4c968e7efb74 100644 --- a/trunk/arch/mips/kernel/mcount.S +++ b/trunk/arch/mips/kernel/mcount.S @@ -46,8 +46,9 @@ PTR_L a5, PT_R9(sp) PTR_L a6, PT_R10(sp) PTR_L a7, PT_R11(sp) -#else PTR_ADDIU sp, PT_SIZE +#else + PTR_ADDIU sp, (PT_SIZE + 8) #endif .endm @@ -68,9 +69,7 @@ NESTED(ftrace_caller, PT_SIZE, ra) .globl _mcount _mcount: b ftrace_stub - addiu sp,sp,8 - - /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ + nop lw t1, function_trace_stop bnez t1, ftrace_stub nop diff --git a/trunk/arch/mips/kernel/vpe.c b/trunk/arch/mips/kernel/vpe.c index 147cec19621d..eec690af6581 100644 --- a/trunk/arch/mips/kernel/vpe.c +++ b/trunk/arch/mips/kernel/vpe.c @@ -705,7 +705,7 @@ static int vpe_run(struct vpe * v) printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", - v->tc->index); + t->index); return -ENOEXEC; } } else { diff --git a/trunk/arch/mips/lantiq/irq.c b/trunk/arch/mips/lantiq/irq.c index a7935bf0fecb..f36acd1b3808 100644 --- a/trunk/arch/mips/lantiq/irq.c +++ b/trunk/arch/mips/lantiq/irq.c @@ -408,7 +408,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) #endif /* tell oprofile which irq to use */ - cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); + cp0_perfcount_irq = LTQ_PERF_IRQ; /* * if the timer irq is not one of the mips irqs we need to diff --git a/trunk/arch/mips/lib/delay.c b/trunk/arch/mips/lib/delay.c index 288f7954988d..dc81ca8dc0dd 100644 --- a/trunk/arch/mips/lib/delay.c +++ b/trunk/arch/mips/lib/delay.c @@ -21,7 +21,7 @@ void __delay(unsigned long loops) " .set noreorder \n" " .align 3 \n" "1: bnez %0, 1b \n" -#if BITS_PER_LONG == 32 +#if __SIZEOF_LONG__ == 4 " subu %0, 1 \n" #else " dsubu %0, 1 \n" diff --git a/trunk/arch/mips/mm/ioremap.c b/trunk/arch/mips/mm/ioremap.c index cacfd31e8ec9..7657fd21cd3f 100644 --- a/trunk/arch/mips/mm/ioremap.c +++ b/trunk/arch/mips/mm/ioremap.c @@ -190,3 +190,9 @@ void __iounmap(const volatile void __iomem *addr) EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(__iounmap); + +int __virt_addr_valid(const volatile void *kaddr) +{ + return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); +} +EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/trunk/arch/mips/mm/mmap.c b/trunk/arch/mips/mm/mmap.c index 7e5fe2790d8a..d9be7540a6be 100644 --- a/trunk/arch/mips/mm/mmap.c +++ b/trunk/arch/mips/mm/mmap.c @@ -192,9 +192,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) return ret; } - -int __virt_addr_valid(const volatile void *kaddr) -{ - return pfn_valid(PFN_DOWN(virt_to_phys(kaddr))); -} -EXPORT_SYMBOL_GPL(__virt_addr_valid); diff --git a/trunk/arch/mips/netlogic/xlr/setup.c b/trunk/arch/mips/netlogic/xlr/setup.c index c5ce6992ac4c..4e7f49d3d5a8 100644 --- a/trunk/arch/mips/netlogic/xlr/setup.c +++ b/trunk/arch/mips/netlogic/xlr/setup.c @@ -193,11 +193,8 @@ static void nlm_init_node(void) void __init prom_init(void) { - int *argv, *envp; /* passed as 32 bit ptrs */ + int i, *argv, *envp; /* passed as 32 bit ptrs */ struct psb_info *prom_infop; -#ifdef CONFIG_SMP - int i; -#endif /* truncate to 32 bit and sign extend all args */ argv = (int *)(long)(int)fw_arg1; diff --git a/trunk/arch/mips/pci/pci-ar71xx.c b/trunk/arch/mips/pci/pci-ar71xx.c index 6eaa4f2d0e38..1552522b8718 100644 --- a/trunk/arch/mips/pci/pci-ar71xx.c +++ b/trunk/arch/mips/pci/pci-ar71xx.c @@ -24,7 +24,7 @@ #include #define AR71XX_PCI_MEM_BASE 0x10000000 -#define AR71XX_PCI_MEM_SIZE 0x07000000 +#define AR71XX_PCI_MEM_SIZE 0x08000000 #define AR71XX_PCI_WIN0_OFFS 0x10000000 #define AR71XX_PCI_WIN1_OFFS 0x11000000 diff --git a/trunk/arch/mips/pci/pci-ar724x.c b/trunk/arch/mips/pci/pci-ar724x.c index c11c75be2d7e..86d77a666458 100644 --- a/trunk/arch/mips/pci/pci-ar724x.c +++ b/trunk/arch/mips/pci/pci-ar724x.c @@ -21,7 +21,7 @@ #define AR724X_PCI_CTRL_SIZE 0x100 #define AR724X_PCI_MEM_BASE 0x10000000 -#define AR724X_PCI_MEM_SIZE 0x04000000 +#define AR724X_PCI_MEM_SIZE 0x08000000 #define AR724X_PCI_REG_RESET 0x18 #define AR724X_PCI_REG_INT_STATUS 0x4c diff --git a/trunk/arch/mn10300/Kconfig b/trunk/arch/mn10300/Kconfig index e70001cfa05b..aa03f2e13385 100644 --- a/trunk/arch/mn10300/Kconfig +++ b/trunk/arch/mn10300/Kconfig @@ -6,7 +6,6 @@ config MN10300 select ARCH_WANT_IPC_PARSE_VERSION select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_KGDB - select GENERIC_ATOMIC64 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA diff --git a/trunk/arch/mn10300/include/asm/dma-mapping.h b/trunk/arch/mn10300/include/asm/dma-mapping.h index a18abfc558eb..c1be4397b1ed 100644 --- a/trunk/arch/mn10300/include/asm/dma-mapping.h +++ b/trunk/arch/mn10300/include/asm/dma-mapping.h @@ -168,19 +168,4 @@ void dma_cache_sync(void *vaddr, size_t size, mn10300_dcache_flush_inv(); } -/* Not supported for now */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} - #endif diff --git a/trunk/arch/parisc/include/asm/dma-mapping.h b/trunk/arch/parisc/include/asm/dma-mapping.h index 106b395688e1..467bbd510eac 100644 --- a/trunk/arch/parisc/include/asm/dma-mapping.h +++ b/trunk/arch/parisc/include/asm/dma-mapping.h @@ -238,19 +238,4 @@ void * sba_get_iommu(struct parisc_device *dev); /* At the moment, we panic on error for IOMMU resource exaustion */ #define dma_mapping_error(dev, x) 0 -/* This API cannot be supported on PA-RISC */ -static inline int dma_mmap_coherent(struct device *dev, - struct vm_area_struct *vma, void *cpu_addr, - dma_addr_t dma_addr, size_t size) -{ - return -EINVAL; -} - -static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, - void *cpu_addr, dma_addr_t dma_addr, - size_t size) -{ - return -EINVAL; -} - #endif diff --git a/trunk/arch/parisc/kernel/entry.S b/trunk/arch/parisc/kernel/entry.S index eb7850b46c25..bfb44247d7a7 100644 --- a/trunk/arch/parisc/kernel/entry.S +++ b/trunk/arch/parisc/kernel/entry.S @@ -1865,7 +1865,7 @@ syscall_restore: /* Are we being ptraced? */ ldw TASK_FLAGS(%r1),%r19 - ldi _TIF_SYSCALL_TRACE_MASK,%r2 + ldi (_TIF_SINGLESTEP|_TIF_BLOCKSTEP),%r2 and,COND(=) %r19,%r2,%r0 b,n syscall_restore_rfi @@ -1978,23 +1978,15 @@ syscall_restore_rfi: /* sr2 should be set to zero for userspace syscalls */ STREG %r0,TASK_PT_SR2(%r1) +pt_regs_ok: LDREG TASK_PT_GR31(%r1),%r2 - depi 3,31,2,%r2 /* ensure return to user mode. */ - STREG %r2,TASK_PT_IAOQ0(%r1) + depi 3,31,2,%r2 /* ensure return to user mode. */ + STREG %r2,TASK_PT_IAOQ0(%r1) ldo 4(%r2),%r2 STREG %r2,TASK_PT_IAOQ1(%r1) - b intr_restore copy %r25,%r16 - -pt_regs_ok: - LDREG TASK_PT_IAOQ0(%r1),%r2 - depi 3,31,2,%r2 /* ensure return to user mode. */ - STREG %r2,TASK_PT_IAOQ0(%r1) - LDREG TASK_PT_IAOQ1(%r1),%r2 - depi 3,31,2,%r2 - STREG %r2,TASK_PT_IAOQ1(%r1) b intr_restore - copy %r25,%r16 + nop .import schedule,code syscall_do_resched: diff --git a/trunk/arch/parisc/kernel/irq.c b/trunk/arch/parisc/kernel/irq.c index 0299d63cd112..c0b1affc06a8 100644 --- a/trunk/arch/parisc/kernel/irq.c +++ b/trunk/arch/parisc/kernel/irq.c @@ -410,13 +410,11 @@ void __init init_IRQ(void) { local_irq_disable(); /* PARANOID - should already be disabled */ mtctl(~0UL, 23); /* EIRR : clear all pending external intr */ + claim_cpu_irqs(); #ifdef CONFIG_SMP - if (!cpu_eiem) { - claim_cpu_irqs(); + if (!cpu_eiem) cpu_eiem = EIEM_MASK(IPI_IRQ) | EIEM_MASK(TIMER_IRQ); - } #else - claim_cpu_irqs(); cpu_eiem = EIEM_MASK(TIMER_IRQ); #endif set_eiem(cpu_eiem); /* EIEM : enable all external intr */ diff --git a/trunk/arch/parisc/kernel/ptrace.c b/trunk/arch/parisc/kernel/ptrace.c index 534abd4936e1..857c2f545470 100644 --- a/trunk/arch/parisc/kernel/ptrace.c +++ b/trunk/arch/parisc/kernel/ptrace.c @@ -26,7 +26,7 @@ #include /* PSW bits we allow the debugger to modify */ -#define USER_PSW_BITS (PSW_N | PSW_B | PSW_V | PSW_CB) +#define USER_PSW_BITS (PSW_N | PSW_V | PSW_CB) /* * Called by kernel/ptrace.c when detaching.. diff --git a/trunk/arch/parisc/kernel/signal.c b/trunk/arch/parisc/kernel/signal.c index fd051705a407..537996955998 100644 --- a/trunk/arch/parisc/kernel/signal.c +++ b/trunk/arch/parisc/kernel/signal.c @@ -190,10 +190,8 @@ get_sigframe(struct k_sigaction *ka, unsigned long sp, size_t frame_size) DBG(1,"get_sigframe: ka = %#lx, sp = %#lx, frame_size = %#lx\n", (unsigned long)ka, sp, frame_size); - /* Align alternate stack and reserve 64 bytes for the signal - handler's frame marker. */ if ((ka->sa.sa_flags & SA_ONSTACK) != 0 && ! sas_ss_flags(sp)) - sp = (current->sas_ss_sp + 0x7f) & ~0x3f; /* Stacks grow up! */ + sp = current->sas_ss_sp; /* Stacks grow up! */ DBG(1,"get_sigframe: Returning sp = %#lx\n", (unsigned long)sp); return (void __user *) sp; /* Stacks grow up. Fun. */ diff --git a/trunk/arch/parisc/math-emu/cnv_float.h b/trunk/arch/parisc/math-emu/cnv_float.h index 933423fa5144..9071e093164a 100644 --- a/trunk/arch/parisc/math-emu/cnv_float.h +++ b/trunk/arch/parisc/math-emu/cnv_float.h @@ -347,15 +347,16 @@ Sgl_isinexact_to_fix(sgl_value,exponent) #define Duint_from_sgl_mantissa(sgl_value,exponent,dresultA,dresultB) \ - {unsigned int val = Sall(sgl_value) << SGL_EXP_LENGTH; \ + {Sall(sgl_value) <<= SGL_EXP_LENGTH; /* left-justify */ \ if (exponent <= 31) { \ - Dintp1(dresultA) = 0; \ - Dintp2(dresultB) = val >> (31 - exponent); \ + Dintp1(dresultA) = 0; \ + Dintp2(dresultB) = (unsigned)Sall(sgl_value) >> (31 - exponent); \ } \ else { \ - Dintp1(dresultA) = val >> (63 - exponent); \ - Dintp2(dresultB) = exponent <= 62 ? val << (exponent - 31) : 0; \ + Dintp1(dresultA) = Sall(sgl_value) >> (63 - exponent); \ + Dintp2(dresultB) = Sall(sgl_value) << (exponent - 31); \ } \ + Sall(sgl_value) >>= SGL_EXP_LENGTH; /* return to original */ \ } #define Duint_setzero(dresultA,dresultB) \ diff --git a/trunk/arch/powerpc/include/uapi/asm/kvm_para.h b/trunk/arch/powerpc/include/uapi/asm/kvm_para.h index e3af3286a068..ed0e0254b47f 100644 --- a/trunk/arch/powerpc/include/uapi/asm/kvm_para.h +++ b/trunk/arch/powerpc/include/uapi/asm/kvm_para.h @@ -78,7 +78,7 @@ struct kvm_vcpu_arch_shared { #define KVM_HCALL_TOKEN(num) _EV_HCALL_TOKEN(EV_KVM_VENDOR_ID, num) -#include +#include #define KVM_FEATURE_MAGIC_PAGE 1 diff --git a/trunk/arch/powerpc/kernel/entry_32.S b/trunk/arch/powerpc/kernel/entry_32.S index e514de57a125..d22e73e4618b 100644 --- a/trunk/arch/powerpc/kernel/entry_32.S +++ b/trunk/arch/powerpc/kernel/entry_32.S @@ -439,8 +439,6 @@ ret_from_fork: ret_from_kernel_thread: REST_NVGPRS(r1) bl schedule_tail - li r3,0 - stw r3,0(r1) mtlr r14 mr r3,r15 PPC440EP_ERR42 diff --git a/trunk/arch/powerpc/kernel/entry_64.S b/trunk/arch/powerpc/kernel/entry_64.S index 3d990d3bd8ba..b310a0573625 100644 --- a/trunk/arch/powerpc/kernel/entry_64.S +++ b/trunk/arch/powerpc/kernel/entry_64.S @@ -664,19 +664,6 @@ resume_kernel: ld r4,TI_FLAGS(r9) andi. r0,r4,_TIF_NEED_RESCHED bne 1b - - /* - * arch_local_irq_restore() from preempt_schedule_irq above may - * enable hard interrupt but we really should disable interrupts - * when we return from the interrupt, and so that we don't get - * interrupted after loading SRR0/1. - */ -#ifdef CONFIG_PPC_BOOK3E - wrteei 0 -#else - ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ - mtmsrd r10,1 /* Update machine state */ -#endif /* CONFIG_PPC_BOOK3E */ #endif /* CONFIG_PREEMPT */ .globl fast_exc_return_irq diff --git a/trunk/arch/powerpc/kernel/kgdb.c b/trunk/arch/powerpc/kernel/kgdb.c index a7bc7521c064..c470a40b29f5 100644 --- a/trunk/arch/powerpc/kernel/kgdb.c +++ b/trunk/arch/powerpc/kernel/kgdb.c @@ -154,12 +154,12 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs) static int kgdb_singlestep(struct pt_regs *regs) { struct thread_info *thread_info, *exception_thread_info; - struct thread_info *backup_current_thread_info; + struct thread_info *backup_current_thread_info = \ + (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); if (user_mode(regs)) return 0; - backup_current_thread_info = (struct thread_info *)kmalloc(sizeof(struct thread_info), GFP_KERNEL); /* * On Book E and perhaps other processors, singlestep is handled on * the critical exception stack. This causes current_thread_info() @@ -185,7 +185,6 @@ static int kgdb_singlestep(struct pt_regs *regs) /* Restore current_thread_info lastly. */ memcpy(exception_thread_info, backup_current_thread_info, sizeof *thread_info); - kfree(backup_current_thread_info); return 1; } diff --git a/trunk/arch/powerpc/kernel/time.c b/trunk/arch/powerpc/kernel/time.c index 127361e093f4..6f6b1cccc916 100644 --- a/trunk/arch/powerpc/kernel/time.c +++ b/trunk/arch/powerpc/kernel/time.c @@ -494,15 +494,10 @@ void timer_interrupt(struct pt_regs * regs) set_dec(DECREMENTER_MAX); /* Some implementations of hotplug will get timer interrupts while - * offline, just ignore these and we also need to set - * decrementers_next_tb as MAX to make sure __check_irq_replay - * don't replay timer interrupt when return, otherwise we'll trap - * here infinitely :( + * offline, just ignore these */ - if (!cpu_online(smp_processor_id())) { - *next_tb = ~(u64)0; + if (!cpu_online(smp_processor_id())) return; - } /* Conditionally hard-enable interrupts now that the DEC has been * bumped to its maximum value diff --git a/trunk/arch/powerpc/kvm/book3s_hv_ras.c b/trunk/arch/powerpc/kvm/book3s_hv_ras.c index a353c485808c..35f3cf0269b3 100644 --- a/trunk/arch/powerpc/kvm/book3s_hv_ras.c +++ b/trunk/arch/powerpc/kvm/book3s_hv_ras.c @@ -79,9 +79,7 @@ static void flush_tlb_power7(struct kvm_vcpu *vcpu) static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) { unsigned long srr1 = vcpu->arch.shregs.msr; -#ifdef CONFIG_PPC_POWERNV struct opal_machine_check_event *opal_evt; -#endif long handled = 1; if (srr1 & SRR1_MC_LDSTERR) { @@ -119,7 +117,6 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) handled = 0; } -#ifdef CONFIG_PPC_POWERNV /* * See if OPAL has already handled the condition. * We assume that if the condition is recovered then OPAL @@ -134,7 +131,6 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) if (handled) opal_evt->in_use = 0; -#endif return handled; } diff --git a/trunk/arch/powerpc/kvm/emulate.c b/trunk/arch/powerpc/kvm/emulate.c index 9d9cddc5b346..b0855e5d8905 100644 --- a/trunk/arch/powerpc/kvm/emulate.c +++ b/trunk/arch/powerpc/kvm/emulate.c @@ -39,7 +39,6 @@ #define OP_31_XOP_TRAP 4 #define OP_31_XOP_LWZX 23 #define OP_31_XOP_TRAP_64 68 -#define OP_31_XOP_DCBF 86 #define OP_31_XOP_LBZX 87 #define OP_31_XOP_STWX 151 #define OP_31_XOP_STBX 215 @@ -375,7 +374,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); break; - case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but diff --git a/trunk/arch/powerpc/mm/hash_low_64.S b/trunk/arch/powerpc/mm/hash_low_64.S index 7443481a315c..56585086413a 100644 --- a/trunk/arch/powerpc/mm/hash_low_64.S +++ b/trunk/arch/powerpc/mm/hash_low_64.S @@ -115,13 +115,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) sldi r29,r5,SID_SHIFT - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - /* - * Calculate hash value for primary slot and store it in r28 - * r3 = va, r5 = vsid - * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) - */ - rldicl r0,r3,64-12,48 - xor r28,r5,r0 /* hash */ + + /* Calculate hash value for primary slot and store it in r28 */ + rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ + rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ + xor r28,r5,r0 b 4f 3: /* Calc vpn and put it in r29 */ @@ -132,12 +130,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* * calculate hash value for primary slot and * store it in r28 for 1T segment - * r3 = va, r5 = vsid */ - sldi r28,r5,25 /* vsid << 25 */ - /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ - rldicl r0,r3,64-12,36 - xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ + rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ + clrldi r5,r5,40 /* vsid & 0xffffff */ + rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ + xor r28,r28,r5 xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -410,13 +407,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) */ rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - /* - * Calculate hash value for primary slot and store it in r28 - * r3 = va, r5 = vsid - * r0 = (va >> 12) & ((1ul << (28 - 12)) -1) - */ - rldicl r0,r3,64-12,48 - xor r28,r5,r0 /* hash */ + + /* Calculate hash value for primary slot and store it in r28 */ + rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ + rldicl r0,r3,64-12,48 /* (ea >> 12) & 0xffff */ + xor r28,r5,r0 b 4f 3: /* Calc vpn and put it in r29 */ @@ -431,12 +426,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) /* * Calculate hash value for primary slot and * store it in r28 for 1T segment - * r3 = va, r5 = vsid */ - sldi r28,r5,25 /* vsid << 25 */ - /* r0 = (va >> 12) & ((1ul << (40 - 12)) -1) */ - rldicl r0,r3,64-12,36 - xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ + rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ + clrldi r5,r5,40 /* vsid & 0xffffff */ + rldicl r0,r3,64-12,36 /* (ea >> 12) & 0xfffffff */ + xor r28,r28,r5 xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ @@ -758,27 +752,25 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT - VPN_SHIFT) or r29,r28,r29 - /* Calculate hash value for primary slot and store it in r28 - * r3 = va, r5 = vsid - * r0 = (va >> 16) & ((1ul << (28 - 16)) -1) - */ - rldicl r0,r3,64-16,52 - xor r28,r5,r0 /* hash */ + /* Calculate hash value for primary slot and store it in r28 */ + rldicl r5,r5,0,25 /* vsid & 0x0000007fffffffff */ + rldicl r0,r3,64-16,52 /* (ea >> 16) & 0xfff */ + xor r28,r5,r0 b 4f 3: /* Calc vpn and put it in r29 */ sldi r29,r5,SID_SHIFT_1T - VPN_SHIFT rldicl r28,r3,64 - VPN_SHIFT,64 - (SID_SHIFT_1T - VPN_SHIFT) or r29,r28,r29 + /* * calculate hash value for primary slot and * store it in r28 for 1T segment - * r3 = va, r5 = vsid */ - sldi r28,r5,25 /* vsid << 25 */ - /* r0 = (va >> 16) & ((1ul << (40 - 16)) -1) */ - rldicl r0,r3,64-16,40 - xor r28,r28,r5 /* vsid ^ ( vsid << 25) */ + rldic r28,r5,25,25 /* (vsid << 25) & 0x7fffffffff */ + clrldi r5,r5,40 /* vsid & 0xffffff */ + rldicl r0,r3,64-16,40 /* (ea >> 16) & 0xffffff */ + xor r28,r28,r5 xor r28,r28,r0 /* hash */ /* Convert linux PTE bits into HW equivalents */ diff --git a/trunk/arch/powerpc/oprofile/op_model_power4.c b/trunk/arch/powerpc/oprofile/op_model_power4.c index f444b94935f5..315f9495e9b2 100644 --- a/trunk/arch/powerpc/oprofile/op_model_power4.c +++ b/trunk/arch/powerpc/oprofile/op_model_power4.c @@ -52,7 +52,7 @@ static int power7_marked_instr_event(u64 mmcr1) for (pmc = 0; pmc < 4; pmc++) { psel = mmcr1 & (OPROFILE_PM_PMCSEL_MSK << (OPROFILE_MAX_PMC_NUM - pmc) - * OPROFILE_PMSEL_FIELD_WIDTH); + * OPROFILE_MAX_PMC_NUM); psel = (psel >> ((OPROFILE_MAX_PMC_NUM - pmc) * OPROFILE_PMSEL_FIELD_WIDTH)) & ~1ULL; unit = mmcr1 & (OPROFILE_PM_UNIT_MSK diff --git a/trunk/arch/powerpc/platforms/pasemi/cpufreq.c b/trunk/arch/powerpc/platforms/pasemi/cpufreq.c index 890f30e70f98..95d00173029f 100644 --- a/trunk/arch/powerpc/platforms/pasemi/cpufreq.c +++ b/trunk/arch/powerpc/platforms/pasemi/cpufreq.c @@ -236,13 +236,6 @@ static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) { - /* - * We don't support CPU hotplug. Don't unmap after the system - * has already made it to a running state. - */ - if (system_state != SYSTEM_BOOTING) - return 0; - if (sdcasr_mapbase) iounmap(sdcasr_mapbase); if (sdcpwr_mapbase) diff --git a/trunk/arch/s390/Makefile b/trunk/arch/s390/Makefile index 7e3ce78d4290..4b8e08b56f49 100644 --- a/trunk/arch/s390/Makefile +++ b/trunk/arch/s390/Makefile @@ -24,8 +24,8 @@ CHECKFLAGS += -D__s390__ -msize-long else LD_BFD := elf64-s390 LDFLAGS := -m elf64_s390 -KBUILD_AFLAGS_MODULE += -fPIC -KBUILD_CFLAGS_MODULE += -fPIC +KBUILD_AFLAGS_MODULE += -fpic -D__PIC__ +KBUILD_CFLAGS_MODULE += -fpic -D__PIC__ KBUILD_CFLAGS += -m64 KBUILD_AFLAGS += -m64 UTS_MACHINE := s390x diff --git a/trunk/arch/s390/include/asm/dma.h b/trunk/arch/s390/include/asm/dma.h index bb9bdcd20864..de015d85e3e5 100644 --- a/trunk/arch/s390/include/asm/dma.h +++ b/trunk/arch/s390/include/asm/dma.h @@ -10,10 +10,4 @@ */ #define MAX_DMA_ADDRESS 0x80000000 -#ifdef CONFIG_PCI -extern int isa_dma_bridge_buggy; -#else -#define isa_dma_bridge_buggy (0) -#endif - #endif /* _ASM_S390_DMA_H */ diff --git a/trunk/arch/s390/include/asm/io.h b/trunk/arch/s390/include/asm/io.h index 27cb32185ce1..16c3eb164f4f 100644 --- a/trunk/arch/s390/include/asm/io.h +++ b/trunk/arch/s390/include/asm/io.h @@ -85,11 +85,6 @@ static inline void iounmap(volatile void __iomem *addr) #define __raw_writel zpci_write_u32 #define __raw_writeq zpci_write_u64 -#define readb_relaxed readb -#define readw_relaxed readw -#define readl_relaxed readl -#define readq_relaxed readq - #endif /* CONFIG_PCI */ #include diff --git a/trunk/arch/s390/include/asm/irq.h b/trunk/arch/s390/include/asm/irq.h index 7def77302d63..e6972f85d2b0 100644 --- a/trunk/arch/s390/include/asm/irq.h +++ b/trunk/arch/s390/include/asm/irq.h @@ -2,61 +2,43 @@ #define _ASM_IRQ_H #include -#include -#include #include -enum interruption_main_class { +enum interruption_class { EXTERNAL_INTERRUPT, IO_INTERRUPT, - NR_IRQS -}; - -enum interruption_class { - IRQEXT_CLK, - IRQEXT_EXC, - IRQEXT_EMS, - IRQEXT_TMR, - IRQEXT_TLA, - IRQEXT_PFL, - IRQEXT_DSD, - IRQEXT_VRT, - IRQEXT_SCP, - IRQEXT_IUC, - IRQEXT_CMS, - IRQEXT_CMC, - IRQEXT_CMR, - IRQIO_CIO, - IRQIO_QAI, - IRQIO_DAS, - IRQIO_C15, - IRQIO_C70, - IRQIO_TAP, - IRQIO_VMR, - IRQIO_LCS, - IRQIO_CLW, - IRQIO_CTC, - IRQIO_APB, - IRQIO_ADM, - IRQIO_CSC, - IRQIO_PCI, - IRQIO_MSI, + EXTINT_CLK, + EXTINT_EXC, + EXTINT_EMS, + EXTINT_TMR, + EXTINT_TLA, + EXTINT_PFL, + EXTINT_DSD, + EXTINT_VRT, + EXTINT_SCP, + EXTINT_IUC, + EXTINT_CMS, + EXTINT_CMC, + EXTINT_CMR, + IOINT_CIO, + IOINT_QAI, + IOINT_DAS, + IOINT_C15, + IOINT_C70, + IOINT_TAP, + IOINT_VMR, + IOINT_LCS, + IOINT_CLW, + IOINT_CTC, + IOINT_APB, + IOINT_ADM, + IOINT_CSC, + IOINT_PCI, + IOINT_MSI, NMI_NMI, - CPU_RST, - NR_ARCH_IRQS + NR_IRQS, }; -struct irq_stat { - unsigned int irqs[NR_ARCH_IRQS]; -}; - -DECLARE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); - -static __always_inline void inc_irq_stat(enum interruption_class irq) -{ - __get_cpu_var(irq_stat).irqs[irq]++; -} - struct ext_code { unsigned short subcode; unsigned short code; diff --git a/trunk/arch/s390/include/asm/pgtable.h b/trunk/arch/s390/include/asm/pgtable.h index 098adbb62660..c928dc1938f2 100644 --- a/trunk/arch/s390/include/asm/pgtable.h +++ b/trunk/arch/s390/include/asm/pgtable.h @@ -1365,18 +1365,6 @@ static inline void pmdp_invalidate(struct vm_area_struct *vma, __pmd_idte(address, pmdp); } -#define __HAVE_ARCH_PMDP_SET_WRPROTECT -static inline void pmdp_set_wrprotect(struct mm_struct *mm, - unsigned long address, pmd_t *pmdp) -{ - pmd_t pmd = *pmdp; - - if (pmd_write(pmd)) { - __pmd_idte(address, pmdp); - set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd)); - } -} - static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot) { pmd_t __pmd; @@ -1399,7 +1387,10 @@ static inline int has_transparent_hugepage(void) static inline unsigned long pmd_pfn(pmd_t pmd) { - return pmd_val(pmd) >> PAGE_SHIFT; + if (pmd_trans_huge(pmd)) + return pmd_val(pmd) >> HPAGE_SHIFT; + else + return pmd_val(pmd) >> PAGE_SHIFT; } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ diff --git a/trunk/arch/s390/include/asm/timex.h b/trunk/arch/s390/include/asm/timex.h index 4c060bb5b8ea..fba4d66788a2 100644 --- a/trunk/arch/s390/include/asm/timex.h +++ b/trunk/arch/s390/include/asm/timex.h @@ -128,32 +128,4 @@ static inline unsigned long long get_clock_monotonic(void) return get_clock_xt() - sched_clock_base_cc; } -/** - * tod_to_ns - convert a TOD format value to nanoseconds - * @todval: to be converted TOD format value - * Returns: number of nanoseconds that correspond to the TOD format value - * - * Converting a 64 Bit TOD format value to nanoseconds means that the value - * must be divided by 4.096. In order to achieve that we multiply with 125 - * and divide by 512: - * - * ns = (todval * 125) >> 9; - * - * In order to avoid an overflow with the multiplication we can rewrite this. - * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) - * we end up with - * - * ns = ((2^32 * th + tl) * 125 ) >> 9; - * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); - * - */ -static inline unsigned long long tod_to_ns(unsigned long long todval) -{ - unsigned long long ns; - - ns = ((todval >> 32) << 23) * 125; - ns += ((todval & 0xffffffff) * 125) >> 9; - return ns; -} - #endif diff --git a/trunk/arch/s390/include/uapi/asm/unistd.h b/trunk/arch/s390/include/uapi/asm/unistd.h index 864f693c237f..63e6078699f1 100644 --- a/trunk/arch/s390/include/uapi/asm/unistd.h +++ b/trunk/arch/s390/include/uapi/asm/unistd.h @@ -279,8 +279,7 @@ #define __NR_process_vm_writev 341 #define __NR_s390_runtime_instr 342 #define __NR_kcmp 343 -#define __NR_finit_module 344 -#define NR_syscalls 345 +#define NR_syscalls 344 /* * There are some system calls that are not present on 64 bit, some diff --git a/trunk/arch/s390/kernel/compat_wrapper.S b/trunk/arch/s390/kernel/compat_wrapper.S index 9b9a805656b5..827e094a2f49 100644 --- a/trunk/arch/s390/kernel/compat_wrapper.S +++ b/trunk/arch/s390/kernel/compat_wrapper.S @@ -1659,9 +1659,3 @@ ENTRY(sys_kcmp_wrapper) llgfr %r5,%r5 # unsigned long llgfr %r6,%r6 # unsigned long jg sys_kcmp - -ENTRY(sys_finit_module_wrapper) - lgfr %r2,%r2 # int - llgtr %r3,%r3 # const char __user * - lgfr %r4,%r4 # int - jg sys_finit_module diff --git a/trunk/arch/s390/kernel/debug.c b/trunk/arch/s390/kernel/debug.c index 4e8215e0d4b6..ba500d8dc392 100644 --- a/trunk/arch/s390/kernel/debug.c +++ b/trunk/arch/s390/kernel/debug.c @@ -1127,14 +1127,13 @@ debug_register_view(debug_info_t * id, struct debug_view *view) if (i == DEBUG_MAX_VIEWS) { pr_err("Registering view %s/%s would exceed the maximum " "number of views %i\n", id->name, view->name, i); + debugfs_remove(pde); rc = -1; } else { id->views[i] = view; id->debugfs_entries[i] = pde; } spin_unlock_irqrestore(&id->lock, flags); - if (rc) - debugfs_remove(pde); out: return rc; } @@ -1147,9 +1146,9 @@ EXPORT_SYMBOL(debug_register_view); int debug_unregister_view(debug_info_t * id, struct debug_view *view) { - struct dentry *dentry = NULL; + int rc = 0; + int i; unsigned long flags; - int i, rc = 0; if (!id) goto out; @@ -1161,12 +1160,10 @@ debug_unregister_view(debug_info_t * id, struct debug_view *view) if (i == DEBUG_MAX_VIEWS) rc = -1; else { - dentry = id->debugfs_entries[i]; + debugfs_remove(id->debugfs_entries[i]); id->views[i] = NULL; - id->debugfs_entries[i] = NULL; } spin_unlock_irqrestore(&id->lock, flags); - debugfs_remove(dentry); out: return rc; } diff --git a/trunk/arch/s390/kernel/irq.c b/trunk/arch/s390/kernel/irq.c index 9df824ea1667..bf24293970ce 100644 --- a/trunk/arch/s390/kernel/irq.c +++ b/trunk/arch/s390/kernel/irq.c @@ -24,65 +24,43 @@ #include #include "entry.h" -DEFINE_PER_CPU_SHARED_ALIGNED(struct irq_stat, irq_stat); -EXPORT_PER_CPU_SYMBOL_GPL(irq_stat); - struct irq_class { char *name; char *desc; }; -/* - * The list of "main" irq classes on s390. This is the list of interrrupts - * that appear both in /proc/stat ("intr" line) and /proc/interrupts. - * Historically only external and I/O interrupts have been part of /proc/stat. - * We can't add the split external and I/O sub classes since the first field - * in the "intr" line in /proc/stat is supposed to be the sum of all other - * fields. - * Since the external and I/O interrupt fields are already sums we would end - * up with having a sum which accounts each interrupt twice. - */ -static const struct irq_class irqclass_main_desc[NR_IRQS] = { +static const struct irq_class intrclass_names[] = { [EXTERNAL_INTERRUPT] = {.name = "EXT"}, - [IO_INTERRUPT] = {.name = "I/O"} -}; - -/* - * The list of split external and I/O interrupts that appear only in - * /proc/interrupts. - * In addition this list contains non external / I/O events like NMIs. - */ -static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { - [IRQEXT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, - [IRQEXT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, - [IRQEXT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, - [IRQEXT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, - [IRQEXT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"}, - [IRQEXT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, - [IRQEXT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"}, - [IRQEXT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"}, - [IRQEXT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"}, - [IRQEXT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"}, - [IRQEXT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, - [IRQEXT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, - [IRQEXT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, - [IRQIO_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, - [IRQIO_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, - [IRQIO_DAS] = {.name = "DAS", .desc = "[I/O] DASD"}, - [IRQIO_C15] = {.name = "C15", .desc = "[I/O] 3215"}, - [IRQIO_C70] = {.name = "C70", .desc = "[I/O] 3270"}, - [IRQIO_TAP] = {.name = "TAP", .desc = "[I/O] Tape"}, - [IRQIO_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"}, - [IRQIO_LCS] = {.name = "LCS", .desc = "[I/O] LCS"}, - [IRQIO_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"}, - [IRQIO_CTC] = {.name = "CTC", .desc = "[I/O] CTC"}, - [IRQIO_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, - [IRQIO_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, - [IRQIO_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, - [IRQIO_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, - [IRQIO_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, + [IO_INTERRUPT] = {.name = "I/O"}, + [EXTINT_CLK] = {.name = "CLK", .desc = "[EXT] Clock Comparator"}, + [EXTINT_EXC] = {.name = "EXC", .desc = "[EXT] External Call"}, + [EXTINT_EMS] = {.name = "EMS", .desc = "[EXT] Emergency Signal"}, + [EXTINT_TMR] = {.name = "TMR", .desc = "[EXT] CPU Timer"}, + [EXTINT_TLA] = {.name = "TAL", .desc = "[EXT] Timing Alert"}, + [EXTINT_PFL] = {.name = "PFL", .desc = "[EXT] Pseudo Page Fault"}, + [EXTINT_DSD] = {.name = "DSD", .desc = "[EXT] DASD Diag"}, + [EXTINT_VRT] = {.name = "VRT", .desc = "[EXT] Virtio"}, + [EXTINT_SCP] = {.name = "SCP", .desc = "[EXT] Service Call"}, + [EXTINT_IUC] = {.name = "IUC", .desc = "[EXT] IUCV"}, + [EXTINT_CMS] = {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"}, + [EXTINT_CMC] = {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"}, + [EXTINT_CMR] = {.name = "CMR", .desc = "[EXT] CPU-Measurement: RI"}, + [IOINT_CIO] = {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"}, + [IOINT_QAI] = {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"}, + [IOINT_DAS] = {.name = "DAS", .desc = "[I/O] DASD"}, + [IOINT_C15] = {.name = "C15", .desc = "[I/O] 3215"}, + [IOINT_C70] = {.name = "C70", .desc = "[I/O] 3270"}, + [IOINT_TAP] = {.name = "TAP", .desc = "[I/O] Tape"}, + [IOINT_VMR] = {.name = "VMR", .desc = "[I/O] Unit Record Devices"}, + [IOINT_LCS] = {.name = "LCS", .desc = "[I/O] LCS"}, + [IOINT_CLW] = {.name = "CLW", .desc = "[I/O] CLAW"}, + [IOINT_CTC] = {.name = "CTC", .desc = "[I/O] CTC"}, + [IOINT_APB] = {.name = "APB", .desc = "[I/O] AP Bus"}, + [IOINT_ADM] = {.name = "ADM", .desc = "[I/O] EADM Subchannel"}, + [IOINT_CSC] = {.name = "CSC", .desc = "[I/O] CHSC Subchannel"}, + [IOINT_PCI] = {.name = "PCI", .desc = "[I/O] PCI Interrupt" }, + [IOINT_MSI] = {.name = "MSI", .desc = "[I/O] MSI Interrupt" }, [NMI_NMI] = {.name = "NMI", .desc = "[NMI] Machine Check"}, - [CPU_RST] = {.name = "RST", .desc = "[CPU] CPU Restart"}, }; /* @@ -90,34 +68,30 @@ static const struct irq_class irqclass_sub_desc[NR_ARCH_IRQS] = { */ int show_interrupts(struct seq_file *p, void *v) { - int irq = *(loff_t *) v; - int cpu; + int i = *(loff_t *) v, j; get_online_cpus(); - if (irq == 0) { + if (i == 0) { seq_puts(p, " "); - for_each_online_cpu(cpu) - seq_printf(p, "CPU%d ", cpu); + for_each_online_cpu(j) + seq_printf(p, "CPU%d ",j); seq_putc(p, '\n'); } - if (irq < NR_IRQS) { - seq_printf(p, "%s: ", irqclass_main_desc[irq].name); - for_each_online_cpu(cpu) - seq_printf(p, "%10u ", kstat_cpu(cpu).irqs[irq]); - seq_putc(p, '\n'); - goto skip_arch_irqs; - } - for (irq = 0; irq < NR_ARCH_IRQS; irq++) { - seq_printf(p, "%s: ", irqclass_sub_desc[irq].name); - for_each_online_cpu(cpu) - seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).irqs[irq]); - if (irqclass_sub_desc[irq].desc) - seq_printf(p, " %s", irqclass_sub_desc[irq].desc); - seq_putc(p, '\n'); - } -skip_arch_irqs: + + if (i < NR_IRQS) { + seq_printf(p, "%s: ", intrclass_names[i].name); +#ifndef CONFIG_SMP + seq_printf(p, "%10u ", kstat_irqs(i)); +#else + for_each_online_cpu(j) + seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); +#endif + if (intrclass_names[i].desc) + seq_printf(p, " %s", intrclass_names[i].desc); + seq_putc(p, '\n'); + } put_online_cpus(); - return 0; + return 0; } /* @@ -248,7 +222,7 @@ void __irq_entry do_extint(struct pt_regs *regs, struct ext_code ext_code, /* Serve timer interrupts first. */ clock_comparator_work(); } - kstat_incr_irqs_this_cpu(EXTERNAL_INTERRUPT, NULL); + kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; if (ext_code.code != 0x1004) __get_cpu_var(s390_idle).nohz_delay = 1; diff --git a/trunk/arch/s390/kernel/nmi.c b/trunk/arch/s390/kernel/nmi.c index 7918fbea36bb..a6daa5c5cdb0 100644 --- a/trunk/arch/s390/kernel/nmi.c +++ b/trunk/arch/s390/kernel/nmi.c @@ -254,7 +254,7 @@ void notrace s390_do_machine_check(struct pt_regs *regs) int umode; nmi_enter(); - inc_irq_stat(NMI_NMI); + kstat_cpu(smp_processor_id()).irqs[NMI_NMI]++; mci = (struct mci *) &S390_lowcore.mcck_interruption_code; mcck = &__get_cpu_var(cpu_mcck); umode = user_mode(regs); diff --git a/trunk/arch/s390/kernel/perf_cpum_cf.c b/trunk/arch/s390/kernel/perf_cpum_cf.c index 86ec7447e1f5..c4e7269d4a09 100644 --- a/trunk/arch/s390/kernel/perf_cpum_cf.c +++ b/trunk/arch/s390/kernel/perf_cpum_cf.c @@ -229,7 +229,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code, if (!(alert & CPU_MF_INT_CF_MASK)) return; - inc_irq_stat(IRQEXT_CMC); + kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++; cpuhw = &__get_cpu_var(cpu_hw_events); /* Measurement alerts are shared and might happen when the PMU diff --git a/trunk/arch/s390/kernel/runtime_instr.c b/trunk/arch/s390/kernel/runtime_instr.c index 077a99389b07..61066f6f71a5 100644 --- a/trunk/arch/s390/kernel/runtime_instr.c +++ b/trunk/arch/s390/kernel/runtime_instr.c @@ -71,7 +71,7 @@ static void runtime_instr_int_handler(struct ext_code ext_code, if (!(param32 & CPU_MF_INT_RI_MASK)) return; - inc_irq_stat(IRQEXT_CMR); + kstat_cpu(smp_processor_id()).irqs[EXTINT_CMR]++; if (!current->thread.ri_cb) return; diff --git a/trunk/arch/s390/kernel/setup.c b/trunk/arch/s390/kernel/setup.c index a5360de85ec7..2568590973ad 100644 --- a/trunk/arch/s390/kernel/setup.c +++ b/trunk/arch/s390/kernel/setup.c @@ -16,7 +16,7 @@ #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include -#include +#include #include #include #include @@ -289,7 +289,6 @@ void machine_power_off(void) * Dummy power off function. */ void (*pm_power_off)(void) = machine_power_off; -EXPORT_SYMBOL_GPL(pm_power_off); static int __init early_parse_mem(char *p) { diff --git a/trunk/arch/s390/kernel/smp.c b/trunk/arch/s390/kernel/smp.c index 7433a2f9e5cc..0b45baa55438 100644 --- a/trunk/arch/s390/kernel/smp.c +++ b/trunk/arch/s390/kernel/smp.c @@ -433,9 +433,9 @@ static void do_ext_call_interrupt(struct ext_code ext_code, cpu = smp_processor_id(); if (ext_code.code == 0x1202) - inc_irq_stat(IRQEXT_EXC); + kstat_cpu(cpu).irqs[EXTINT_EXC]++; else - inc_irq_stat(IRQEXT_EMS); + kstat_cpu(cpu).irqs[EXTINT_EMS]++; /* * handle bit signal external calls */ @@ -623,10 +623,9 @@ static struct sclp_cpu_info *smp_get_cpu_info(void) return info; } -static int __cpuinit smp_add_present_cpu(int cpu); +static int smp_add_present_cpu(int cpu); -static int __cpuinit __smp_rescan_cpus(struct sclp_cpu_info *info, - int sysfs_add) +static int __smp_rescan_cpus(struct sclp_cpu_info *info, int sysfs_add) { struct pcpu *pcpu; cpumask_t avail; @@ -709,7 +708,6 @@ static void __cpuinit smp_start_secondary(void *cpuvoid) pfault_init(); notify_cpu_starting(smp_processor_id()); set_cpu_online(smp_processor_id(), true); - inc_irq_stat(CPU_RST); local_irq_enable(); /* cpu_idle will call schedule for us */ cpu_idle(); @@ -987,7 +985,7 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self, return notifier_from_errno(err); } -static int __cpuinit smp_add_present_cpu(int cpu) +static int smp_add_present_cpu(int cpu) { struct cpu *c = &pcpu_devices[cpu].cpu; struct device *s = &c->dev; diff --git a/trunk/arch/s390/kernel/syscalls.S b/trunk/arch/s390/kernel/syscalls.S index 6a6c61f94dd3..48174850f3b0 100644 --- a/trunk/arch/s390/kernel/syscalls.S +++ b/trunk/arch/s390/kernel/syscalls.S @@ -352,4 +352,3 @@ SYSCALL(sys_process_vm_readv,sys_process_vm_readv,compat_sys_process_vm_readv_wr SYSCALL(sys_process_vm_writev,sys_process_vm_writev,compat_sys_process_vm_writev_wrapper) SYSCALL(sys_ni_syscall,sys_s390_runtime_instr,sys_s390_runtime_instr_wrapper) SYSCALL(sys_kcmp,sys_kcmp,sys_kcmp_wrapper) -SYSCALL(sys_finit_module,sys_finit_module,sys_finit_module_wrapper) diff --git a/trunk/arch/s390/kernel/time.c b/trunk/arch/s390/kernel/time.c index a5f4f5a1d24b..7fcd690d42c7 100644 --- a/trunk/arch/s390/kernel/time.c +++ b/trunk/arch/s390/kernel/time.c @@ -63,7 +63,7 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators); */ unsigned long long notrace __kprobes sched_clock(void) { - return tod_to_ns(get_clock_monotonic()); + return (get_clock_monotonic() * 125) >> 9; } /* @@ -168,7 +168,7 @@ static void clock_comparator_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { - inc_irq_stat(IRQEXT_CLK); + kstat_cpu(smp_processor_id()).irqs[EXTINT_CLK]++; if (S390_lowcore.clock_comparator == -1ULL) set_clock_comparator(S390_lowcore.clock_comparator); } @@ -179,7 +179,7 @@ static void stp_timing_alert(struct stp_irq_parm *); static void timing_alert_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { - inc_irq_stat(IRQEXT_TLA); + kstat_cpu(smp_processor_id()).irqs[EXTINT_TLA]++; if (param32 & 0x00c40000) etr_timing_alert((struct etr_irq_parm *) ¶m32); if (param32 & 0x00038000) diff --git a/trunk/arch/s390/kernel/topology.c b/trunk/arch/s390/kernel/topology.c index 4b2e3e317004..f1aba87cceb8 100644 --- a/trunk/arch/s390/kernel/topology.c +++ b/trunk/arch/s390/kernel/topology.c @@ -10,7 +10,6 @@ #include #include #include -#include #include #include #include @@ -43,7 +42,6 @@ static struct mask_info socket_info; static struct mask_info book_info; struct cpu_topology_s390 cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu) { diff --git a/trunk/arch/s390/kvm/interrupt.c b/trunk/arch/s390/kvm/interrupt.c index 82c481ddef76..c30615e605ac 100644 --- a/trunk/arch/s390/kvm/interrupt.c +++ b/trunk/arch/s390/kvm/interrupt.c @@ -408,7 +408,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return 0; } - sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now); + sltime = ((vcpu->arch.sie_block->ckc - now)*125)>>9; hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime); diff --git a/trunk/arch/s390/kvm/kvm-s390.c b/trunk/arch/s390/kvm/kvm-s390.c index f090e819bf71..c9011bfaabbe 100644 --- a/trunk/arch/s390/kvm/kvm-s390.c +++ b/trunk/arch/s390/kvm/kvm-s390.c @@ -613,9 +613,7 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) kvm_s390_deliver_pending_interrupts(vcpu); vcpu->arch.sie_block->icptcode = 0; - preempt_disable(); kvm_guest_enter(); - preempt_enable(); VCPU_EVENT(vcpu, 6, "entering sie flags %x", atomic_read(&vcpu->arch.sie_block->cpuflags)); trace_kvm_s390_sie_enter(vcpu, diff --git a/trunk/arch/s390/mm/fault.c b/trunk/arch/s390/mm/fault.c index 2fb9e63b8fc4..42601d6e166f 100644 --- a/trunk/arch/s390/mm/fault.c +++ b/trunk/arch/s390/mm/fault.c @@ -569,7 +569,7 @@ static void pfault_interrupt(struct ext_code ext_code, subcode = ext_code.subcode; if ((subcode & 0xff00) != __SUBCODE_MASK) return; - inc_irq_stat(IRQEXT_PFL); + kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; /* Get the token (= pid of the affected task). */ pid = sizeof(void *) == 4 ? param32 : param64; rcu_read_lock(); diff --git a/trunk/arch/s390/oprofile/hwsampler.c b/trunk/arch/s390/oprofile/hwsampler.c index b5b2916895e0..0cb385da202c 100644 --- a/trunk/arch/s390/oprofile/hwsampler.c +++ b/trunk/arch/s390/oprofile/hwsampler.c @@ -233,7 +233,7 @@ static void hws_ext_handler(struct ext_code ext_code, if (!(param32 & CPU_MF_INT_SF_MASK)) return; - inc_irq_stat(IRQEXT_CMS); + kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++; atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); if (hws_wq) diff --git a/trunk/arch/s390/pci/pci.c b/trunk/arch/s390/pci/pci.c index 60e0372545d2..ff49427e9941 100644 --- a/trunk/arch/s390/pci/pci.c +++ b/trunk/arch/s390/pci/pci.c @@ -160,6 +160,35 @@ int pci_proc_domain(struct pci_bus *bus) } EXPORT_SYMBOL_GPL(pci_proc_domain); +/* Store PCI function information block */ +static int zpci_store_fib(struct zpci_dev *zdev, u8 *fc) +{ + struct zpci_fib *fib; + u8 status, cc; + + fib = (void *) get_zeroed_page(GFP_KERNEL); + if (!fib) + return -ENOMEM; + + do { + cc = __stpcifc(zdev->fh, 0, fib, &status); + if (cc == 2) { + msleep(ZPCI_INSN_BUSY_DELAY); + memset(fib, 0, PAGE_SIZE); + } + } while (cc == 2); + + if (cc) + pr_err_once("%s: cc: %u status: %u\n", + __func__, cc, status); + + /* Return PCI function controls */ + *fc = fib->fc; + + free_page((unsigned long) fib); + return (cc) ? -EIO : 0; +} + /* Modify PCI: Register adapter interruptions */ static int zpci_register_airq(struct zpci_dev *zdev, unsigned int aisb, u64 aibv) @@ -440,7 +469,7 @@ static void zpci_irq_handler(void *dont, void *need) int rescan = 0, max = aisb_max; struct zdev_irq_map *imap; - inc_irq_stat(IRQIO_PCI); + kstat_cpu(smp_processor_id()).irqs[IOINT_PCI]++; sbit = start; scan: @@ -452,7 +481,7 @@ static void zpci_irq_handler(void *dont, void *need) /* find vector bit */ imap = bucket->imap[sbit]; for_each_set_bit_left(mbit, &imap->aibv, imap->msi_vecs) { - inc_irq_stat(IRQIO_MSI); + kstat_cpu(smp_processor_id()).irqs[IOINT_MSI]++; clear_bit(63 - mbit, &imap->aibv); spin_lock(&imap->lock); diff --git a/trunk/arch/s390/pci/pci_dma.c b/trunk/arch/s390/pci/pci_dma.c index a547419907c3..6138468b420f 100644 --- a/trunk/arch/s390/pci/pci_dma.c +++ b/trunk/arch/s390/pci/pci_dma.c @@ -13,6 +13,8 @@ #include #include +static enum zpci_ioat_dtype zpci_ioat_dt = ZPCI_IOTA_RTTO; + static struct kmem_cache *dma_region_table_cache; static struct kmem_cache *dma_page_table_cache; diff --git a/trunk/arch/sh/boards/mach-ecovec24/setup.c b/trunk/arch/sh/boards/mach-ecovec24/setup.c index a0fa5791cd44..3fede4556c91 100644 --- a/trunk/arch/sh/boards/mach-ecovec24/setup.c +++ b/trunk/arch/sh/boards/mach-ecovec24/setup.c @@ -70,16 +70,6 @@ * OFF-ON : MMC */ -/* - * FSI - DA7210 - * - * it needs amixer settings for playing - * - * amixer set 'HeadPhone' 80 - * amixer set 'Out Mixer Left DAC Left' on - * amixer set 'Out Mixer Right DAC Right' on - */ - /* Heartbeat */ static unsigned char led_pos[] = { 0, 1, 2, 3 }; diff --git a/trunk/arch/sh/include/asm/elf.h b/trunk/arch/sh/include/asm/elf.h index bf9f44f17c29..37924afa8d8a 100644 --- a/trunk/arch/sh/include/asm/elf.h +++ b/trunk/arch/sh/include/asm/elf.h @@ -203,9 +203,9 @@ extern void __kernel_vsyscall; if (vdso_enabled) \ NEW_AUX_ENT(AT_SYSINFO_EHDR, VDSO_BASE); \ else \ - NEW_AUX_ENT(AT_IGNORE, 0) + NEW_AUX_ENT(AT_IGNORE, 0); #else -#define VSYSCALL_AUX_ENT NEW_AUX_ENT(AT_IGNORE, 0) +#define VSYSCALL_AUX_ENT #endif /* CONFIG_VSYSCALL */ #ifdef CONFIG_SH_FPU diff --git a/trunk/arch/sh/include/asm/processor_32.h b/trunk/arch/sh/include/asm/processor_32.h index e699a12cdcca..b1320d55ca30 100644 --- a/trunk/arch/sh/include/asm/processor_32.h +++ b/trunk/arch/sh/include/asm/processor_32.h @@ -39,7 +39,7 @@ /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) +#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) /* * Bit of SR register diff --git a/trunk/arch/sh/include/asm/processor_64.h b/trunk/arch/sh/include/asm/processor_64.h index 1cc7d3197143..1ee8946f0952 100644 --- a/trunk/arch/sh/include/asm/processor_64.h +++ b/trunk/arch/sh/include/asm/processor_64.h @@ -47,7 +47,7 @@ pc; }) /* This decides where the kernel will search for a free chunk of vm * space during mmap's. */ -#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) +#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) /* * Bit of SR register diff --git a/trunk/arch/sh/include/uapi/asm/unistd_32.h b/trunk/arch/sh/include/uapi/asm/unistd_32.h index d13a1d623736..9e465f246dc1 100644 --- a/trunk/arch/sh/include/uapi/asm/unistd_32.h +++ b/trunk/arch/sh/include/uapi/asm/unistd_32.h @@ -379,8 +379,7 @@ #define __NR_process_vm_readv 365 #define __NR_process_vm_writev 366 #define __NR_kcmp 367 -#define __NR_finit_module 368 -#define NR_syscalls 369 +#define NR_syscalls 368 #endif /* __ASM_SH_UNISTD_32_H */ diff --git a/trunk/arch/sh/include/uapi/asm/unistd_64.h b/trunk/arch/sh/include/uapi/asm/unistd_64.h index e6820c86e8c7..8e3a2edd284e 100644 --- a/trunk/arch/sh/include/uapi/asm/unistd_64.h +++ b/trunk/arch/sh/include/uapi/asm/unistd_64.h @@ -399,8 +399,7 @@ #define __NR_process_vm_readv 376 #define __NR_process_vm_writev 377 #define __NR_kcmp 378 -#define __NR_finit_module 379 -#define NR_syscalls 380 +#define NR_syscalls 379 #endif /* __ASM_SH_UNISTD_64_H */ diff --git a/trunk/arch/sh/kernel/syscalls_32.S b/trunk/arch/sh/kernel/syscalls_32.S index 734234be2f01..fe97ae5e56f1 100644 --- a/trunk/arch/sh/kernel/syscalls_32.S +++ b/trunk/arch/sh/kernel/syscalls_32.S @@ -385,4 +385,3 @@ ENTRY(sys_call_table) .long sys_process_vm_readv /* 365 */ .long sys_process_vm_writev .long sys_kcmp - .long sys_finit_module diff --git a/trunk/arch/sh/kernel/syscalls_64.S b/trunk/arch/sh/kernel/syscalls_64.S index 579fcb9a896b..5c7b1c67bdc1 100644 --- a/trunk/arch/sh/kernel/syscalls_64.S +++ b/trunk/arch/sh/kernel/syscalls_64.S @@ -405,4 +405,3 @@ sys_call_table: .long sys_process_vm_readv .long sys_process_vm_writev .long sys_kcmp - .long sys_finit_module diff --git a/trunk/arch/sh/lib/mcount.S b/trunk/arch/sh/lib/mcount.S index 52aa2011d753..60164e65d665 100644 --- a/trunk/arch/sh/lib/mcount.S +++ b/trunk/arch/sh/lib/mcount.S @@ -294,8 +294,6 @@ stack_panic: .align 2 .L_init_thread_union: .long init_thread_union -.L_ebss: - .long __bss_stop .Lpanic: .long panic .Lpanic_s: diff --git a/trunk/arch/sparc/include/uapi/asm/unistd.h b/trunk/arch/sparc/include/uapi/asm/unistd.h index 62ced589bcf7..cac719d1bc5c 100644 --- a/trunk/arch/sparc/include/uapi/asm/unistd.h +++ b/trunk/arch/sparc/include/uapi/asm/unistd.h @@ -407,9 +407,8 @@ #define __NR_process_vm_writev 339 #define __NR_kern_features 340 #define __NR_kcmp 341 -#define __NR_finit_module 342 -#define NR_syscalls 343 +#define NR_syscalls 342 /* Bitmask values returned from kern_features system call. */ #define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 diff --git a/trunk/arch/sparc/kernel/pci.c b/trunk/arch/sparc/kernel/pci.c index baf4366e2d6a..04bacce76fe6 100644 --- a/trunk/arch/sparc/kernel/pci.c +++ b/trunk/arch/sparc/kernel/pci.c @@ -378,8 +378,7 @@ static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) /* Cook up fake bus resources for SUNW,simba PCI bridges which lack * a proper 'ranges' property. */ -static void apb_fake_ranges(struct pci_dev *dev, - struct pci_bus *bus, +static void apb_fake_ranges(struct pci_dev *dev, struct pci_bus *bus, struct pci_pbm_info *pbm) { struct pci_bus_region region; @@ -404,15 +403,13 @@ static void apb_fake_ranges(struct pci_dev *dev, pcibios_bus_to_resource(dev, res, ®ion); } -static void pci_of_scan_bus(struct pci_pbm_info *pbm, - struct device_node *node, +static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node, struct pci_bus *bus); #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) static void of_scan_pci_bridge(struct pci_pbm_info *pbm, - struct device_node *node, - struct pci_dev *dev) + struct device_node *node, struct pci_dev *dev) { struct pci_bus *bus; const u32 *busrange, *ranges; @@ -503,8 +500,7 @@ static void of_scan_pci_bridge(struct pci_pbm_info *pbm, pci_of_scan_bus(pbm, node, bus); } -static void pci_of_scan_bus(struct pci_pbm_info *pbm, - struct device_node *node, +static void pci_of_scan_bus(struct pci_pbm_info *pbm, struct device_node *node, struct pci_bus *bus) { struct device_node *child; diff --git a/trunk/arch/sparc/kernel/pci_psycho.c b/trunk/arch/sparc/kernel/pci_psycho.c index c647634ead2b..b85238289717 100644 --- a/trunk/arch/sparc/kernel/pci_psycho.c +++ b/trunk/arch/sparc/kernel/pci_psycho.c @@ -366,8 +366,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) pci_config_write8(addr, 64); } -static void psycho_scan_bus(struct pci_pbm_info *pbm, - struct device *parent) +static void psycho_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { pbm_config_busmastering(pbm); pbm->is_66mhz_capable = 0; diff --git a/trunk/arch/sparc/kernel/pci_sabre.c b/trunk/arch/sparc/kernel/pci_sabre.c index 6f00d27e8dac..531186d7c9ab 100644 --- a/trunk/arch/sparc/kernel/pci_sabre.c +++ b/trunk/arch/sparc/kernel/pci_sabre.c @@ -442,8 +442,7 @@ static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent) sabre_register_error_handlers(pbm); } -static void sabre_pbm_init(struct pci_pbm_info *pbm, - struct platform_device *op) +static void sabre_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op) { psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE); pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR; diff --git a/trunk/arch/sparc/kernel/pci_schizo.c b/trunk/arch/sparc/kernel/pci_schizo.c index 8f76f23dac38..29e888158ae6 100644 --- a/trunk/arch/sparc/kernel/pci_schizo.c +++ b/trunk/arch/sparc/kernel/pci_schizo.c @@ -1306,9 +1306,8 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm) } } -static int schizo_pbm_init(struct pci_pbm_info *pbm, - struct platform_device *op, u32 portid, - int chip_type) +static int schizo_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op, + u32 portid, int chip_type) { const struct linux_prom64_registers *regs; struct device_node *dp = op->dev.of_node; diff --git a/trunk/arch/sparc/kernel/systbls_32.S b/trunk/arch/sparc/kernel/systbls_32.S index 6ac43c36bbbf..5147f574f125 100644 --- a/trunk/arch/sparc/kernel/systbls_32.S +++ b/trunk/arch/sparc/kernel/systbls_32.S @@ -85,4 +85,4 @@ sys_call_table: /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init /*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime /*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev -/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module +/*340*/ .long sys_ni_syscall, sys_kcmp diff --git a/trunk/arch/sparc/kernel/systbls_64.S b/trunk/arch/sparc/kernel/systbls_64.S index 1009ecb92678..cdbd9b817751 100644 --- a/trunk/arch/sparc/kernel/systbls_64.S +++ b/trunk/arch/sparc/kernel/systbls_64.S @@ -86,7 +86,7 @@ sys_call_table32: .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init /*330*/ .word sys32_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev -/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module +/*340*/ .word sys_kern_features, sys_kcmp #endif /* CONFIG_COMPAT */ @@ -164,4 +164,4 @@ sys_call_table: .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init /*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev -/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module +/*340*/ .word sys_kern_features, sys_kcmp diff --git a/trunk/arch/x86/Kconfig b/trunk/arch/x86/Kconfig index 225543bf45a5..79795af59810 100644 --- a/trunk/arch/x86/Kconfig +++ b/trunk/arch/x86/Kconfig @@ -2138,7 +2138,6 @@ config OLPC_XO1_RTC config OLPC_XO1_SCI bool "OLPC XO-1 SCI extras" depends on OLPC && OLPC_XO1_PM - depends on INPUT=y select POWER_SUPPLY select GPIO_CS5535 select MFD_CORE diff --git a/trunk/arch/x86/boot/Makefile b/trunk/arch/x86/boot/Makefile index 379814bc41e3..ccce0ed67dde 100644 --- a/trunk/arch/x86/boot/Makefile +++ b/trunk/arch/x86/boot/Makefile @@ -71,7 +71,7 @@ GCOV_PROFILE := n $(obj)/bzImage: asflags-y := $(SVGA_MODE) quiet_cmd_image = BUILD $@ -cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/zoffset.h > $@ +cmd_image = $(obj)/tools/build $(obj)/setup.bin $(obj)/vmlinux.bin > $@ $(obj)/bzImage: $(obj)/setup.bin $(obj)/vmlinux.bin $(obj)/tools/build FORCE $(call if_changed,image) @@ -92,7 +92,7 @@ targets += voffset.h $(obj)/voffset.h: vmlinux FORCE $(call if_changed,voffset) -sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|startup_64\|efi_pe_entry\|efi_stub_entry\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' +sed-zoffset := -e 's/^\([0-9a-fA-F]*\) . \(startup_32\|input_data\|_end\|z_.*\)$$/\#define ZO_\2 0x\1/p' quiet_cmd_zoffset = ZOFFSET $@ cmd_zoffset = $(NM) $< | sed -n $(sed-zoffset) > $@ diff --git a/trunk/arch/x86/boot/compressed/eboot.c b/trunk/arch/x86/boot/compressed/eboot.c index f8fa41190c35..b1942e222768 100644 --- a/trunk/arch/x86/boot/compressed/eboot.c +++ b/trunk/arch/x86/boot/compressed/eboot.c @@ -256,10 +256,10 @@ static efi_status_t setup_efi_pci(struct boot_params *params) int i; struct setup_data *data; - data = (struct setup_data *)(unsigned long)params->hdr.setup_data; + data = (struct setup_data *)params->hdr.setup_data; while (data && data->next) - data = (struct setup_data *)(unsigned long)data->next; + data = (struct setup_data *)data->next; status = efi_call_phys5(sys_table->boottime->locate_handle, EFI_LOCATE_BY_PROTOCOL, &pci_proto, @@ -295,18 +295,16 @@ static efi_status_t setup_efi_pci(struct boot_params *params) if (!pci) continue; -#ifdef CONFIG_X86_64 status = efi_call_phys4(pci->attributes, pci, EfiPciIoAttributeOperationGet, 0, &attributes); -#else - status = efi_call_phys5(pci->attributes, pci, - EfiPciIoAttributeOperationGet, 0, 0, - &attributes); -#endif + if (status != EFI_SUCCESS) continue; + if (!attributes & EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM) + continue; + if (!pci->romimage || !pci->romsize) continue; @@ -347,9 +345,9 @@ static efi_status_t setup_efi_pci(struct boot_params *params) memcpy(rom->romdata, pci->romimage, pci->romsize); if (data) - data->next = (unsigned long)rom; + data->next = (uint64_t)rom; else - params->hdr.setup_data = (unsigned long)rom; + params->hdr.setup_data = (uint64_t)rom; data = (struct setup_data *)rom; @@ -434,9 +432,10 @@ static efi_status_t setup_gop(struct screen_info *si, efi_guid_t *proto, * Once we've found a GOP supporting ConOut, * don't bother looking any further. */ - first_gop = gop; if (conout_found) break; + + first_gop = gop; } } diff --git a/trunk/arch/x86/boot/compressed/head_32.S b/trunk/arch/x86/boot/compressed/head_32.S index 1e3184f6072f..aa4aaf1b2380 100644 --- a/trunk/arch/x86/boot/compressed/head_32.S +++ b/trunk/arch/x86/boot/compressed/head_32.S @@ -35,11 +35,11 @@ ENTRY(startup_32) #ifdef CONFIG_EFI_STUB jmp preferred_addr + .balign 0x10 /* * We don't need the return address, so set up the stack so - * efi_main() can find its arguments. + * efi_main() can find its arugments. */ -ENTRY(efi_pe_entry) add $0x4, %esp call make_boot_params @@ -50,10 +50,8 @@ ENTRY(efi_pe_entry) pushl %eax pushl %esi pushl %ecx - sub $0x4, %esp -ENTRY(efi_stub_entry) - add $0x4, %esp + .org 0x30,0x90 call efi_main cmpl $0, %eax movl %eax, %esi diff --git a/trunk/arch/x86/boot/compressed/head_64.S b/trunk/arch/x86/boot/compressed/head_64.S index f5d1aaa0dec8..2c4b171eec33 100644 --- a/trunk/arch/x86/boot/compressed/head_64.S +++ b/trunk/arch/x86/boot/compressed/head_64.S @@ -201,12 +201,12 @@ ENTRY(startup_64) */ #ifdef CONFIG_EFI_STUB /* - * The entry point for the PE/COFF executable is efi_pe_entry, so - * only legacy boot loaders will execute this jmp. + * The entry point for the PE/COFF executable is 0x210, so only + * legacy boot loaders will execute this jmp. */ jmp preferred_addr -ENTRY(efi_pe_entry) + .org 0x210 mov %rcx, %rdi mov %rdx, %rsi pushq %rdi @@ -218,7 +218,7 @@ ENTRY(efi_pe_entry) popq %rsi popq %rdi -ENTRY(efi_stub_entry) + .org 0x230,0x90 call efi_main movq %rax,%rsi cmpq $0,%rax diff --git a/trunk/arch/x86/boot/header.S b/trunk/arch/x86/boot/header.S index 944ce595f767..8c132a625b94 100644 --- a/trunk/arch/x86/boot/header.S +++ b/trunk/arch/x86/boot/header.S @@ -21,7 +21,6 @@ #include #include #include -#include #include "boot.h" #include "voffset.h" #include "zoffset.h" @@ -256,9 +255,6 @@ section_table: # header, from the old boot sector. .section ".header", "a" - .globl sentinel -sentinel: .byte 0xff, 0xff /* Used to detect broken loaders */ - .globl hdr hdr: setup_sects: .byte 0 /* Filled in by build.c */ @@ -283,7 +279,7 @@ _start: # Part 2 of the header, from the old setup.S .ascii "HdrS" # header signature - .word 0x020c # header version number (>= 0x0105) + .word 0x020b # header version number (>= 0x0105) # or else old loadlin-1.5 will fail) .globl realmode_swtch realmode_swtch: .word 0, 0 # default_switch, SETUPSEG @@ -301,7 +297,13 @@ type_of_loader: .byte 0 # 0 means ancient bootloader, newer # flags, unused bits must be zero (RFU) bit within loadflags loadflags: - .byte LOADED_HIGH # The kernel is to be loaded high +LOADED_HIGH = 1 # If set, the kernel is loaded high +CAN_USE_HEAP = 0x80 # If set, the loader also has set + # heap_end_ptr to tell how much + # space behind setup.S can be used for + # heap purposes. + # Only the loader knows what is free + .byte LOADED_HIGH setup_move_size: .word 0x8000 # size to move, when setup is not # loaded at 0x90000. We will move setup @@ -367,23 +369,7 @@ relocatable_kernel: .byte 1 relocatable_kernel: .byte 0 #endif min_alignment: .byte MIN_KERNEL_ALIGN_LG2 # minimum alignment - -xloadflags: -#ifdef CONFIG_X86_64 -# define XLF0 XLF_KERNEL_64 /* 64-bit kernel */ -#else -# define XLF0 0 -#endif -#ifdef CONFIG_EFI_STUB -# ifdef CONFIG_X86_64 -# define XLF23 XLF_EFI_HANDOVER_64 /* 64-bit EFI handover ok */ -# else -# define XLF23 XLF_EFI_HANDOVER_32 /* 32-bit EFI handover ok */ -# endif -#else -# define XLF23 0 -#endif - .word XLF0 | XLF23 +pad3: .word 0 cmdline_size: .long COMMAND_LINE_SIZE-1 #length of the command line, #added with boot protocol @@ -411,13 +397,8 @@ pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr #define INIT_SIZE VO_INIT_SIZE #endif init_size: .long INIT_SIZE # kernel initialization size -handover_offset: -#ifdef CONFIG_EFI_STUB - .long 0x30 # offset to the handover +handover_offset: .long 0x30 # offset to the handover # protocol entry point -#else - .long 0 -#endif # End of setup header ##################################################### diff --git a/trunk/arch/x86/boot/setup.ld b/trunk/arch/x86/boot/setup.ld index 96a6c7563538..03c0683636b6 100644 --- a/trunk/arch/x86/boot/setup.ld +++ b/trunk/arch/x86/boot/setup.ld @@ -13,7 +13,7 @@ SECTIONS .bstext : { *(.bstext) } .bsdata : { *(.bsdata) } - . = 495; + . = 497; .header : { *(.header) } .entrytext : { *(.entrytext) } .inittext : { *(.inittext) } diff --git a/trunk/arch/x86/boot/tools/build.c b/trunk/arch/x86/boot/tools/build.c index 94c544650020..4b8e165ee572 100644 --- a/trunk/arch/x86/boot/tools/build.c +++ b/trunk/arch/x86/boot/tools/build.c @@ -52,10 +52,6 @@ int is_big_kernel; #define PECOFF_RELOC_RESERVE 0x20 -unsigned long efi_stub_entry; -unsigned long efi_pe_entry; -unsigned long startup_64; - /*----------------------------------------------------------------------*/ static const u32 crctab32[] = { @@ -136,7 +132,7 @@ static void die(const char * str, ...) static void usage(void) { - die("Usage: build setup system [zoffset.h] [> image]"); + die("Usage: build setup system [> image]"); } #ifdef CONFIG_EFI_STUB @@ -210,54 +206,30 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz) */ put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]); +#ifdef CONFIG_X86_32 /* - * Address of entry point for PE/COFF executable + * Address of entry point. + * + * The EFI stub entry point is +16 bytes from the start of + * the .text section. */ - put_unaligned_le32(text_start + efi_pe_entry, &buf[pe_header + 0x28]); + put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]); +#else + /* + * Address of entry point. startup_32 is at the beginning and + * the 64-bit entry point (startup_64) is always 512 bytes + * after. The EFI stub entry point is 16 bytes after that, as + * the first instruction allows legacy loaders to jump over + * the EFI stub initialisation + */ + put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]); +#endif /* CONFIG_X86_32 */ update_pecoff_section_header(".text", text_start, text_sz); } #endif /* CONFIG_EFI_STUB */ - -/* - * Parse zoffset.h and find the entry points. We could just #include zoffset.h - * but that would mean tools/build would have to be rebuilt every time. It's - * not as if parsing it is hard... - */ -#define PARSE_ZOFS(p, sym) do { \ - if (!strncmp(p, "#define ZO_" #sym " ", 11+sizeof(#sym))) \ - sym = strtoul(p + 11 + sizeof(#sym), NULL, 16); \ -} while (0) - -static void parse_zoffset(char *fname) -{ - FILE *file; - char *p; - int c; - - file = fopen(fname, "r"); - if (!file) - die("Unable to open `%s': %m", fname); - c = fread(buf, 1, sizeof(buf) - 1, file); - if (ferror(file)) - die("read-error on `zoffset.h'"); - buf[c] = 0; - - p = (char *)buf; - - while (p && *p) { - PARSE_ZOFS(p, efi_stub_entry); - PARSE_ZOFS(p, efi_pe_entry); - PARSE_ZOFS(p, startup_64); - - p = strchr(p, '\n'); - while (p && (*p == '\r' || *p == '\n')) - p++; - } -} - int main(int argc, char ** argv) { unsigned int i, sz, setup_sectors; @@ -269,19 +241,7 @@ int main(int argc, char ** argv) void *kernel; u32 crc = 0xffffffffUL; - /* Defaults for old kernel */ -#ifdef CONFIG_X86_32 - efi_pe_entry = 0x10; - efi_stub_entry = 0x30; -#else - efi_pe_entry = 0x210; - efi_stub_entry = 0x230; - startup_64 = 0x200; -#endif - - if (argc == 4) - parse_zoffset(argv[3]); - else if (argc != 3) + if (argc != 3) usage(); /* Copy the setup code */ @@ -339,11 +299,6 @@ int main(int argc, char ** argv) #ifdef CONFIG_EFI_STUB update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); - -#ifdef CONFIG_X86_64 /* Yes, this is really how we defined it :( */ - efi_stub_entry -= 0x200; -#endif - put_unaligned_le32(efi_stub_entry, &buf[0x264]); #endif crc = partial_crc32(buf, i, crc); diff --git a/trunk/arch/x86/ia32/ia32entry.S b/trunk/arch/x86/ia32/ia32entry.S index 142c4ceff112..102ff7cb3e41 100644 --- a/trunk/arch/x86/ia32/ia32entry.S +++ b/trunk/arch/x86/ia32/ia32entry.S @@ -207,7 +207,7 @@ sysexit_from_sys_call: testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) jnz ia32_ret_from_sys_call TRACE_IRQS_ON - ENABLE_INTERRUPTS(CLBR_NONE) + sti movl %eax,%esi /* second arg, syscall return value */ cmpl $-MAX_ERRNO,%eax /* is it an error ? */ jbe 1f @@ -217,7 +217,7 @@ sysexit_from_sys_call: call __audit_syscall_exit movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */ movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi - DISABLE_INTERRUPTS(CLBR_NONE) + cli TRACE_IRQS_OFF testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET) jz \exit diff --git a/trunk/arch/x86/include/asm/efi.h b/trunk/arch/x86/include/asm/efi.h index 28677c55113f..6e8fdf5ad113 100644 --- a/trunk/arch/x86/include/asm/efi.h +++ b/trunk/arch/x86/include/asm/efi.h @@ -94,7 +94,6 @@ extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, #endif /* CONFIG_X86_32 */ extern int add_efi_memmap; -extern unsigned long x86_efi_facility; extern void efi_set_executable(efi_memory_desc_t *md, bool executable); extern int efi_memblock_x86_reserve_range(void); extern void efi_call_phys_prelog(void); diff --git a/trunk/arch/x86/include/asm/uv/uv.h b/trunk/arch/x86/include/asm/uv/uv.h index 062921ef34e9..b47c2a82ff15 100644 --- a/trunk/arch/x86/include/asm/uv/uv.h +++ b/trunk/arch/x86/include/asm/uv/uv.h @@ -16,7 +16,7 @@ extern void uv_system_init(void); extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, unsigned long start, - unsigned long end, + unsigned end, unsigned int cpu); #else /* X86_UV */ diff --git a/trunk/arch/x86/include/uapi/asm/bootparam.h b/trunk/arch/x86/include/uapi/asm/bootparam.h index c15ddaf90710..92862cd90201 100644 --- a/trunk/arch/x86/include/uapi/asm/bootparam.h +++ b/trunk/arch/x86/include/uapi/asm/bootparam.h @@ -1,31 +1,6 @@ #ifndef _ASM_X86_BOOTPARAM_H #define _ASM_X86_BOOTPARAM_H -/* setup_data types */ -#define SETUP_NONE 0 -#define SETUP_E820_EXT 1 -#define SETUP_DTB 2 -#define SETUP_PCI 3 - -/* ram_size flags */ -#define RAMDISK_IMAGE_START_MASK 0x07FF -#define RAMDISK_PROMPT_FLAG 0x8000 -#define RAMDISK_LOAD_FLAG 0x4000 - -/* loadflags */ -#define LOADED_HIGH (1<<0) -#define QUIET_FLAG (1<<5) -#define KEEP_SEGMENTS (1<<6) -#define CAN_USE_HEAP (1<<7) - -/* xloadflags */ -#define XLF_KERNEL_64 (1<<0) -#define XLF_CAN_BE_LOADED_ABOVE_4G (1<<1) -#define XLF_EFI_HANDOVER_32 (1<<2) -#define XLF_EFI_HANDOVER_64 (1<<3) - -#ifndef __ASSEMBLY__ - #include #include #include @@ -34,6 +9,12 @@ #include #include