diff --git a/[refs] b/[refs] index 9e400a621829..b9af89d7ec39 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 5f48a29f1721736930b7817b430039c1d348dd05 +refs/heads/master: 0d0ffa94e0ea0aab3dcb75366806d064f05fdc45 diff --git a/trunk/Documentation/hid/hid-sensor.txt b/trunk/Documentation/hid/hid-sensor.txt old mode 100644 new mode 100755 diff --git a/trunk/Documentation/kernel-parameters.txt b/trunk/Documentation/kernel-parameters.txt index 6c723811c0a0..363e348bff9b 100644 --- a/trunk/Documentation/kernel-parameters.txt +++ b/trunk/Documentation/kernel-parameters.txt @@ -2438,7 +2438,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. real-time workloads. It can also improve energy efficiency for asymmetric multiprocessors. - rcu_nocb_poll [KNL,BOOT] + rcu_nocbs_poll [KNL,BOOT] Rather than requiring that offloaded CPUs (specified by rcu_nocbs= above) explicitly awaken the corresponding "rcuoN" kthreads, diff --git a/trunk/Documentation/virtual/kvm/api.txt b/trunk/Documentation/virtual/kvm/api.txt index c25439a58274..a4df5535996b 100644 --- a/trunk/Documentation/virtual/kvm/api.txt +++ b/trunk/Documentation/virtual/kvm/api.txt @@ -293,7 +293,7 @@ kvm_run' (see below). 4.11 KVM_GET_REGS Capability: basic -Architectures: all except ARM +Architectures: all Type: vcpu ioctl Parameters: struct kvm_regs (out) Returns: 0 on success, -1 on error @@ -314,7 +314,7 @@ struct kvm_regs { 4.12 KVM_SET_REGS Capability: basic -Architectures: all except ARM +Architectures: all Type: vcpu ioctl Parameters: struct kvm_regs (in) Returns: 0 on success, -1 on error @@ -600,7 +600,7 @@ struct kvm_fpu { 4.24 KVM_CREATE_IRQCHIP Capability: KVM_CAP_IRQCHIP -Architectures: x86, ia64, ARM +Architectures: x86, ia64 Type: vm ioctl Parameters: none Returns: 0 on success, -1 on error @@ -608,39 +608,21 @@ Returns: 0 on success, -1 on error Creates an interrupt controller model in the kernel. On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23 -only go to the IOAPIC. On ia64, a IOSAPIC is created. On ARM, a GIC is -created. +only go to the IOAPIC. On ia64, a IOSAPIC is created. 4.25 KVM_IRQ_LINE Capability: KVM_CAP_IRQCHIP -Architectures: x86, ia64, arm +Architectures: x86, ia64 Type: vm ioctl Parameters: struct kvm_irq_level Returns: 0 on success, -1 on error Sets the level of a GSI input to the interrupt controller model in the kernel. -On some architectures it is required that an interrupt controller model has -been previously created with KVM_CREATE_IRQCHIP. Note that edge-triggered -interrupts require the level to be set to 1 and then back to 0. - -ARM can signal an interrupt either at the CPU level, or at the in-kernel irqchip -(GIC), and for in-kernel irqchip can tell the GIC to use PPIs designated for -specific cpus. The irq field is interpreted like this: - -  bits: | 31 ... 24 | 23 ... 16 | 15 ... 0 | - field: | irq_type | vcpu_index | irq_id | - -The irq_type field has the following values: -- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ -- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.) - (the vcpu_index field is ignored) -- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.) - -(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs) - -In both cases, level is used to raise/lower the line. +Requires that an interrupt controller model has been previously created with +KVM_CREATE_IRQCHIP. Note that edge-triggered interrupts require the level +to be set to 1 and then back to 0. struct kvm_irq_level { union { @@ -1793,27 +1775,6 @@ registers, find a list below: PPC | KVM_REG_PPC_VPA_DTL | 128 PPC | KVM_REG_PPC_EPCR | 32 -ARM registers are mapped using the lower 32 bits. The upper 16 of that -is the register group type, or coprocessor number: - -ARM core registers have the following id bit patterns: - 0x4002 0000 0010 - -ARM 32-bit CP15 registers have the following id bit patterns: - 0x4002 0000 000F - -ARM 64-bit CP15 registers have the following id bit patterns: - 0x4003 0000 000F - -ARM CCSIDR registers are demultiplexed by CSSELR value: - 0x4002 0000 0011 00 - -ARM 32-bit VFP control registers have the following id bit patterns: - 0x4002 0000 0012 1 - -ARM 64-bit FP registers have the following id bit patterns: - 0x4002 0000 0012 0 - 4.69 KVM_GET_ONE_REG Capability: KVM_CAP_ONE_REG @@ -2166,50 +2127,6 @@ written, then `n_invalid' invalid entries, invalidating any previously valid entries found. -4.77 KVM_ARM_VCPU_INIT - -Capability: basic -Architectures: arm -Type: vcpu ioctl -Parameters: struct struct kvm_vcpu_init (in) -Returns: 0 on success; -1 on error -Errors: -  EINVAL:    the target is unknown, or the combination of features is invalid. -  ENOENT:    a features bit specified is unknown. - -This tells KVM what type of CPU to present to the guest, and what -optional features it should have.  This will cause a reset of the cpu -registers to their initial values.  If this is not called, KVM_RUN will -return ENOEXEC for that vcpu. - -Note that because some registers reflect machine topology, all vcpus -should be created before this ioctl is invoked. - -Possible features: - - KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state. - Depends on KVM_CAP_ARM_PSCI. - - -4.78 KVM_GET_REG_LIST - -Capability: basic -Architectures: arm -Type: vcpu ioctl -Parameters: struct kvm_reg_list (in/out) -Returns: 0 on success; -1 on error -Errors: -  E2BIG:     the reg index list is too big to fit in the array specified by -             the user (the number required will be written into n). - -struct kvm_reg_list { - __u64 n; /* number of registers in reg[] */ - __u64 reg[0]; -}; - -This ioctl returns the guest registers that are supported for the -KVM_GET_ONE_REG/KVM_SET_ONE_REG calls. - - 5. The kvm_run structure ------------------------ diff --git a/trunk/Documentation/x86/boot.txt b/trunk/Documentation/x86/boot.txt index e540fd67f767..3edb4c2887a1 100644 --- a/trunk/Documentation/x86/boot.txt +++ b/trunk/Documentation/x86/boot.txt @@ -57,7 +57,7 @@ Protocol 2.10: (Kernel 2.6.31) Added a protocol for relaxed alignment Protocol 2.11: (Kernel 3.6) Added a field for offset of EFI handover protocol entry point. -Protocol 2.12: (Kernel 3.8) Added the xloadflags field and extension fields +Protocol 2.12: (Kernel 3.9) Added the xloadflags field and extension fields to struct boot_params for for loading bzImage and ramdisk above 4G in 64bit. diff --git a/trunk/MAINTAINERS b/trunk/MAINTAINERS index 38633ee8b2d0..212c255b9347 100644 --- a/trunk/MAINTAINERS +++ b/trunk/MAINTAINERS @@ -1489,7 +1489,7 @@ AVR32 ARCHITECTURE M: Haavard Skinnemoen M: Hans-Christian Egtvedt W: http://www.atmel.com/products/AVR32/ -W: http://mirror.egtvedt.no/avr32linux.org/ +W: http://avr32linux.org/ W: http://avrfreaks.net/ S: Maintained F: arch/avr32/ @@ -4216,7 +4216,6 @@ M: Thomas Gleixner S: Maintained T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core F: kernel/irq/ -F: drivers/irqchip/ IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) M: Benjamin Herrenschmidt @@ -4482,15 +4481,6 @@ F: arch/s390/include/asm/kvm* F: arch/s390/kvm/ F: drivers/s390/kvm/ -KERNEL VIRTUAL MACHINE (KVM) FOR ARM -M: Christoffer Dall -L: kvmarm@lists.cs.columbia.edu -W: http://systems.cs.columbia.edu/projects/kvm-arm -S: Maintained -F: arch/arm/include/uapi/asm/kvm* -F: arch/arm/include/asm/kvm* -F: arch/arm/kvm/ - KEXEC M: Eric Biederman W: http://kernel.org/pub/linux/utils/kernel/kexec/ diff --git a/trunk/Makefile b/trunk/Makefile index 08ef9bdb80c7..2d3c92c774fb 100644 --- a/trunk/Makefile +++ b/trunk/Makefile @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 8 SUBLEVEL = 0 -EXTRAVERSION = -rc7 -NAME = Unicycling Gorilla +EXTRAVERSION = -rc5 +NAME = Terrified Chipmunk # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/trunk/arch/arm/Kconfig b/trunk/arch/arm/Kconfig index 7a106612cb0c..c20441394504 100644 --- a/trunk/arch/arm/Kconfig +++ b/trunk/arch/arm/Kconfig @@ -2334,5 +2334,3 @@ source "security/Kconfig" source "crypto/Kconfig" source "lib/Kconfig" - -source "arch/arm/kvm/Kconfig" diff --git a/trunk/arch/arm/Makefile b/trunk/arch/arm/Makefile index 4bcd2d6b0535..30c443c406f3 100644 --- a/trunk/arch/arm/Makefile +++ b/trunk/arch/arm/Makefile @@ -252,7 +252,6 @@ core-$(CONFIG_FPE_NWFPE) += arch/arm/nwfpe/ core-$(CONFIG_FPE_FASTFPE) += $(FASTFPE_OBJ) core-$(CONFIG_VFP) += arch/arm/vfp/ core-$(CONFIG_XEN) += arch/arm/xen/ -core-$(CONFIG_KVM_ARM_HOST) += arch/arm/kvm/ # If we have a machine-specific directory, then include it in the build. core-y += arch/arm/kernel/ arch/arm/mm/ arch/arm/common/ diff --git a/trunk/arch/arm/common/Kconfig b/trunk/arch/arm/common/Kconfig index 9353184d730d..45ceeb0e93e0 100644 --- a/trunk/arch/arm/common/Kconfig +++ b/trunk/arch/arm/common/Kconfig @@ -1,3 +1,26 @@ +config ARM_GIC + bool + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER + +config GIC_NON_BANKED + bool + +config ARM_VIC + bool + select IRQ_DOMAIN + select MULTI_IRQ_HANDLER + +config ARM_VIC_NR + int + default 4 if ARCH_S5PV210 + default 3 if ARCH_S5PC100 + default 2 + depends on ARM_VIC + help + The maximum number of VICs available in the system, for + power management. + config ICST bool diff --git a/trunk/arch/arm/common/Makefile b/trunk/arch/arm/common/Makefile index dc8dd0de5c0f..e8a4e58f1b82 100644 --- a/trunk/arch/arm/common/Makefile +++ b/trunk/arch/arm/common/Makefile @@ -2,6 +2,8 @@ # Makefile for the linux kernel. # +obj-$(CONFIG_ARM_GIC) += gic.o +obj-$(CONFIG_ARM_VIC) += vic.o obj-$(CONFIG_ICST) += icst.o obj-$(CONFIG_SA1111) += sa1111.o obj-$(CONFIG_PCI_HOST_VIA82C505) += via82c505.o diff --git a/trunk/drivers/irqchip/irq-gic.c b/trunk/arch/arm/common/gic.c similarity index 95% rename from trunk/drivers/irqchip/irq-gic.c rename to trunk/arch/arm/common/gic.c index 644d72468423..36ae03a3f5d1 100644 --- a/trunk/drivers/irqchip/irq-gic.c +++ b/trunk/arch/arm/common/gic.c @@ -38,14 +38,12 @@ #include #include #include -#include #include #include #include #include - -#include "irqchip.h" +#include union gic_base { void __iomem *common_base; @@ -278,7 +276,7 @@ static int gic_set_wake(struct irq_data *d, unsigned int on) #define gic_set_wake NULL #endif -static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) +asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) { u32 irqstat, irqnr; struct gic_chip_data *gic = &gic_data[0]; @@ -353,25 +351,6 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) irq_set_chained_handler(irq, gic_handle_cascade_irq); } -static u8 gic_get_cpumask(struct gic_chip_data *gic) -{ - void __iomem *base = gic_data_dist_base(gic); - u32 mask, i; - - for (i = mask = 0; i < 32; i += 4) { - mask = readl_relaxed(base + GIC_DIST_TARGET + i); - mask |= mask >> 16; - mask |= mask >> 8; - if (mask) - break; - } - - if (!mask) - pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); - - return mask; -} - static void __init gic_dist_init(struct gic_chip_data *gic) { unsigned int i; @@ -390,9 +369,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic) /* * Set all global interrupts to this CPU only. */ - cpumask = gic_get_cpumask(gic); - cpumask |= cpumask << 8; - cpumask |= cpumask << 16; + cpumask = readl_relaxed(base + GIC_DIST_TARGET + 0); for (i = 32; i < gic_irqs; i += 4) writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); @@ -423,7 +400,7 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic) * Get what the GIC says our CPU mask is. */ BUG_ON(cpu >= NR_GIC_CPU_IF); - cpu_mask = gic_get_cpumask(gic); + cpu_mask = readl_relaxed(dist_base + GIC_DIST_TARGET + 0); gic_cpu_map[cpu] = cpu_mask; /* @@ -640,27 +617,6 @@ static void __init gic_pm_init(struct gic_chip_data *gic) } #endif -#ifdef CONFIG_SMP -void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) -{ - int cpu; - unsigned long map = 0; - - /* Convert our logical CPU mask into a physical one. */ - for_each_cpu(cpu, mask) - map |= 1 << cpu_logical_map(cpu); - - /* - * Ensure that stores to Normal memory are visible to the - * other CPUs before issuing the IPI. - */ - dsb(); - - /* this always happens on GIC0 */ - writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); -} -#endif - static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { @@ -787,12 +743,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, if (WARN_ON(!gic->domain)) return; -#ifdef CONFIG_SMP - set_smp_cross_call(gic_raise_softirq); -#endif - - set_handle_irq(gic_handle_irq); - gic_chip.flags |= gic_arch_extn.flags; gic_dist_init(gic); gic_cpu_init(gic); @@ -806,6 +756,27 @@ void __cpuinit gic_secondary_init(unsigned int gic_nr) gic_cpu_init(&gic_data[gic_nr]); } +#ifdef CONFIG_SMP +void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) +{ + int cpu; + unsigned long map = 0; + + /* Convert our logical CPU mask into a physical one. */ + for_each_cpu(cpu, mask) + map |= gic_cpu_map[cpu]; + + /* + * Ensure that stores to Normal memory are visible to the + * other CPUs before issuing the IPI. + */ + dsb(); + + /* this always happens on GIC0 */ + writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); +} +#endif + #ifdef CONFIG_OF static int gic_cnt __initdata = 0; @@ -837,9 +808,4 @@ int __init gic_of_init(struct device_node *node, struct device_node *parent) gic_cnt++; return 0; } -IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); -IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); -IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); -IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); - #endif diff --git a/trunk/drivers/irqchip/irq-vic.c b/trunk/arch/arm/common/vic.c similarity index 92% rename from trunk/drivers/irqchip/irq-vic.c rename to trunk/arch/arm/common/vic.c index 3cf97aaebe40..8f324b99416e 100644 --- a/trunk/drivers/irqchip/irq-vic.c +++ b/trunk/arch/arm/common/vic.c @@ -30,29 +30,10 @@ #include #include #include -#include #include #include - -#include "irqchip.h" - -#define VIC_IRQ_STATUS 0x00 -#define VIC_FIQ_STATUS 0x04 -#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */ -#define VIC_INT_SOFT 0x18 -#define VIC_INT_SOFT_CLEAR 0x1c -#define VIC_PROTECT 0x20 -#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */ -#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */ - -#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */ -#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */ -#define VIC_ITCR 0x300 /* VIC test control register */ - -#define VIC_VECT_CNTL_ENABLE (1 << 5) - -#define VIC_PL192_VECT_ADDR 0xF00 +#include /** * struct vic_device - VIC PM device @@ -85,8 +66,6 @@ static struct vic_device vic_devices[CONFIG_ARM_VIC_NR]; static int vic_id; -static void vic_handle_irq(struct pt_regs *regs); - /** * vic_init2 - common initialisation code * @base: Base of the VIC. @@ -203,40 +182,6 @@ static int vic_irqdomain_map(struct irq_domain *d, unsigned int irq, return 0; } -/* - * Handle each interrupt in a single VIC. Returns non-zero if we've - * handled at least one interrupt. This reads the status register - * before handling each interrupt, which is necessary given that - * handle_IRQ may briefly re-enable interrupts for soft IRQ handling. - */ -static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) -{ - u32 stat, irq; - int handled = 0; - - while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { - irq = ffs(stat) - 1; - handle_IRQ(irq_find_mapping(vic->domain, irq), regs); - handled = 1; - } - - return handled; -} - -/* - * Keep iterating over all registered VIC's until there are no pending - * interrupts. - */ -static asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) -{ - int i, handled; - - do { - for (i = 0, handled = 0; i < vic_id; ++i) - handled |= handle_one_vic(&vic_devices[i], regs); - } while (handled); -} - static struct irq_domain_ops vic_irqdomain_ops = { .map = vic_irqdomain_map, .xlate = irq_domain_xlate_onetwocell, @@ -273,7 +218,6 @@ static void __init vic_register(void __iomem *base, unsigned int irq, v->valid_sources = valid_sources; v->resume_sources = resume_sources; v->irq = irq; - set_handle_irq(vic_handle_irq); vic_id++; v->domain = irq_domain_add_simple(node, fls(valid_sources), irq, &vic_irqdomain_ops, v); @@ -483,7 +427,38 @@ int __init vic_of_init(struct device_node *node, struct device_node *parent) return 0; } -IRQCHIP_DECLARE(arm_pl190_vic, "arm,pl190-vic", vic_of_init); -IRQCHIP_DECLARE(arm_pl192_vic, "arm,pl192-vic", vic_of_init); -IRQCHIP_DECLARE(arm_versatile_vic, "arm,versatile-vic", vic_of_init); #endif /* CONFIG OF */ + +/* + * Handle each interrupt in a single VIC. Returns non-zero if we've + * handled at least one interrupt. This reads the status register + * before handling each interrupt, which is necessary given that + * handle_IRQ may briefly re-enable interrupts for soft IRQ handling. + */ +static int handle_one_vic(struct vic_device *vic, struct pt_regs *regs) +{ + u32 stat, irq; + int handled = 0; + + while ((stat = readl_relaxed(vic->base + VIC_IRQ_STATUS))) { + irq = ffs(stat) - 1; + handle_IRQ(irq_find_mapping(vic->domain, irq), regs); + handled = 1; + } + + return handled; +} + +/* + * Keep iterating over all registered VIC's until there are no pending + * interrupts. + */ +asmlinkage void __exception_irq_entry vic_handle_irq(struct pt_regs *regs) +{ + int i, handled; + + do { + for (i = 0, handled = 0; i < vic_id; ++i) + handled |= handle_one_vic(&vic_devices[i], regs); + } while (handled); +} diff --git a/trunk/arch/arm/include/asm/cputype.h b/trunk/arch/arm/include/asm/cputype.h index ad41ec2471e8..a59dcb5ab5fc 100644 --- a/trunk/arch/arm/include/asm/cputype.h +++ b/trunk/arch/arm/include/asm/cputype.h @@ -64,24 +64,6 @@ extern unsigned int processor_id; #define read_cpuid_ext(reg) 0 #endif -#define ARM_CPU_IMP_ARM 0x41 -#define ARM_CPU_IMP_INTEL 0x69 - -#define ARM_CPU_PART_ARM1136 0xB360 -#define ARM_CPU_PART_ARM1156 0xB560 -#define ARM_CPU_PART_ARM1176 0xB760 -#define ARM_CPU_PART_ARM11MPCORE 0xB020 -#define ARM_CPU_PART_CORTEX_A8 0xC080 -#define ARM_CPU_PART_CORTEX_A9 0xC090 -#define ARM_CPU_PART_CORTEX_A5 0xC050 -#define ARM_CPU_PART_CORTEX_A15 0xC0F0 -#define ARM_CPU_PART_CORTEX_A7 0xC070 - -#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 -#define ARM_CPU_XSCALE_ARCH_V1 0x2000 -#define ARM_CPU_XSCALE_ARCH_V2 0x4000 -#define ARM_CPU_XSCALE_ARCH_V3 0x6000 - /* * The CPU ID never changes at run time, so we might as well tell the * compiler that it's constant. Use this function to read the CPU ID @@ -92,21 +74,6 @@ static inline unsigned int __attribute_const__ read_cpuid_id(void) return read_cpuid(CPUID_ID); } -static inline unsigned int __attribute_const__ read_cpuid_implementor(void) -{ - return (read_cpuid_id() & 0xFF000000) >> 24; -} - -static inline unsigned int __attribute_const__ read_cpuid_part_number(void) -{ - return read_cpuid_id() & 0xFFF0; -} - -static inline unsigned int __attribute_const__ xscale_cpu_arch_version(void) -{ - return read_cpuid_part_number() & ARM_CPU_XSCALE_ARCH_MASK; -} - static inline unsigned int __attribute_const__ read_cpuid_cachetype(void) { return read_cpuid(CPUID_CACHETYPE); diff --git a/trunk/include/linux/irqchip/arm-gic.h b/trunk/arch/arm/include/asm/hardware/gic.h similarity index 77% rename from trunk/include/linux/irqchip/arm-gic.h rename to trunk/arch/arm/include/asm/hardware/gic.h index a67ca55e6f4e..4b1ce6cd477f 100644 --- a/trunk/include/linux/irqchip/arm-gic.h +++ b/trunk/arch/arm/include/asm/hardware/gic.h @@ -1,5 +1,5 @@ /* - * include/linux/irqchip/arm-gic.h + * arch/arm/include/asm/hardware/gic.h * * Copyright (C) 2002 ARM Limited, All Rights Reserved. * @@ -7,8 +7,10 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#ifndef __LINUX_IRQCHIP_ARM_GIC_H -#define __LINUX_IRQCHIP_ARM_GIC_H +#ifndef __ASM_ARM_HARDWARE_GIC_H +#define __ASM_ARM_HARDWARE_GIC_H + +#include #define GIC_CPU_CTRL 0x00 #define GIC_CPU_PRIMASK 0x04 @@ -30,14 +32,19 @@ #define GIC_DIST_CONFIG 0xc00 #define GIC_DIST_SOFTINT 0xf00 +#ifndef __ASSEMBLY__ +#include struct device_node; extern struct irq_chip gic_arch_extn; void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *, u32 offset, struct device_node *); +int gic_of_init(struct device_node *node, struct device_node *parent); void gic_secondary_init(unsigned int); +void gic_handle_irq(struct pt_regs *regs); void gic_cascade_irq(unsigned int gic_nr, unsigned int irq); +void gic_raise_softirq(const struct cpumask *mask, unsigned int irq); static inline void gic_init(unsigned int nr, int start, void __iomem *dist , void __iomem *cpu) @@ -46,3 +53,5 @@ static inline void gic_init(unsigned int nr, int start, } #endif + +#endif diff --git a/trunk/include/linux/irqchip/arm-vic.h b/trunk/arch/arm/include/asm/hardware/vic.h similarity index 63% rename from trunk/include/linux/irqchip/arm-vic.h rename to trunk/arch/arm/include/asm/hardware/vic.h index e3c82dc95756..2bebad36fc83 100644 --- a/trunk/include/linux/irqchip/arm-vic.h +++ b/trunk/arch/arm/include/asm/hardware/vic.h @@ -20,11 +20,29 @@ #ifndef __ASM_ARM_HARDWARE_VIC_H #define __ASM_ARM_HARDWARE_VIC_H -#include - +#define VIC_IRQ_STATUS 0x00 +#define VIC_FIQ_STATUS 0x04 #define VIC_RAW_STATUS 0x08 +#define VIC_INT_SELECT 0x0c /* 1 = FIQ, 0 = IRQ */ #define VIC_INT_ENABLE 0x10 /* 1 = enable, 0 = disable */ #define VIC_INT_ENABLE_CLEAR 0x14 +#define VIC_INT_SOFT 0x18 +#define VIC_INT_SOFT_CLEAR 0x1c +#define VIC_PROTECT 0x20 +#define VIC_PL190_VECT_ADDR 0x30 /* PL190 only */ +#define VIC_PL190_DEF_VECT_ADDR 0x34 /* PL190 only */ + +#define VIC_VECT_ADDR0 0x100 /* 0 to 15 (0..31 PL192) */ +#define VIC_VECT_CNTL0 0x200 /* 0 to 15 (0..31 PL192) */ +#define VIC_ITCR 0x300 /* VIC test control register */ + +#define VIC_VECT_CNTL_ENABLE (1 << 5) + +#define VIC_PL192_VECT_ADDR 0xF00 + +#ifndef __ASSEMBLY__ +#include +#include struct device_node; struct pt_regs; @@ -32,5 +50,8 @@ struct pt_regs; void __vic_init(void __iomem *base, int irq_start, u32 vic_sources, u32 resume_sources, struct device_node *node); void vic_init(void __iomem *base, unsigned int irq_start, u32 vic_sources, u32 resume_sources); +int vic_of_init(struct device_node *node, struct device_node *parent); +void vic_handle_irq(struct pt_regs *regs); +#endif /* __ASSEMBLY__ */ #endif diff --git a/trunk/arch/arm/include/asm/idmap.h b/trunk/arch/arm/include/asm/idmap.h index 1a66f907e5cc..bf863edb517d 100644 --- a/trunk/arch/arm/include/asm/idmap.h +++ b/trunk/arch/arm/include/asm/idmap.h @@ -8,7 +8,6 @@ #define __idmap __section(.idmap.text) noinline notrace extern pgd_t *idmap_pgd; -extern pgd_t *hyp_pgd; void setup_mm_for_reboot(void); diff --git a/trunk/arch/arm/include/asm/kvm_arm.h b/trunk/arch/arm/include/asm/kvm_arm.h deleted file mode 100644 index 7c3d813e15df..000000000000 --- a/trunk/arch/arm/include/asm/kvm_arm.h +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_ARM_H__ -#define __ARM_KVM_ARM_H__ - -#include - -/* Hyp Configuration Register (HCR) bits */ -#define HCR_TGE (1 << 27) -#define HCR_TVM (1 << 26) -#define HCR_TTLB (1 << 25) -#define HCR_TPU (1 << 24) -#define HCR_TPC (1 << 23) -#define HCR_TSW (1 << 22) -#define HCR_TAC (1 << 21) -#define HCR_TIDCP (1 << 20) -#define HCR_TSC (1 << 19) -#define HCR_TID3 (1 << 18) -#define HCR_TID2 (1 << 17) -#define HCR_TID1 (1 << 16) -#define HCR_TID0 (1 << 15) -#define HCR_TWE (1 << 14) -#define HCR_TWI (1 << 13) -#define HCR_DC (1 << 12) -#define HCR_BSU (3 << 10) -#define HCR_BSU_IS (1 << 10) -#define HCR_FB (1 << 9) -#define HCR_VA (1 << 8) -#define HCR_VI (1 << 7) -#define HCR_VF (1 << 6) -#define HCR_AMO (1 << 5) -#define HCR_IMO (1 << 4) -#define HCR_FMO (1 << 3) -#define HCR_PTW (1 << 2) -#define HCR_SWIO (1 << 1) -#define HCR_VM 1 - -/* - * The bits we set in HCR: - * TAC: Trap ACTLR - * TSC: Trap SMC - * TSW: Trap cache operations by set/way - * TWI: Trap WFI - * TIDCP: Trap L2CTLR/L2ECTLR - * BSU_IS: Upgrade barriers to the inner shareable domain - * FB: Force broadcast of all maintainance operations - * AMO: Override CPSR.A and enable signaling with VA - * IMO: Override CPSR.I and enable signaling with VI - * FMO: Override CPSR.F and enable signaling with VF - * SWIO: Turn set/way invalidates into set/way clean+invalidate - */ -#define HCR_GUEST_MASK (HCR_TSC | HCR_TSW | HCR_TWI | HCR_VM | HCR_BSU_IS | \ - HCR_FB | HCR_TAC | HCR_AMO | HCR_IMO | HCR_FMO | \ - HCR_SWIO | HCR_TIDCP) -#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF) - -/* System Control Register (SCTLR) bits */ -#define SCTLR_TE (1 << 30) -#define SCTLR_EE (1 << 25) -#define SCTLR_V (1 << 13) - -/* Hyp System Control Register (HSCTLR) bits */ -#define HSCTLR_TE (1 << 30) -#define HSCTLR_EE (1 << 25) -#define HSCTLR_FI (1 << 21) -#define HSCTLR_WXN (1 << 19) -#define HSCTLR_I (1 << 12) -#define HSCTLR_C (1 << 2) -#define HSCTLR_A (1 << 1) -#define HSCTLR_M 1 -#define HSCTLR_MASK (HSCTLR_M | HSCTLR_A | HSCTLR_C | HSCTLR_I | \ - HSCTLR_WXN | HSCTLR_FI | HSCTLR_EE | HSCTLR_TE) - -/* TTBCR and HTCR Registers bits */ -#define TTBCR_EAE (1 << 31) -#define TTBCR_IMP (1 << 30) -#define TTBCR_SH1 (3 << 28) -#define TTBCR_ORGN1 (3 << 26) -#define TTBCR_IRGN1 (3 << 24) -#define TTBCR_EPD1 (1 << 23) -#define TTBCR_A1 (1 << 22) -#define TTBCR_T1SZ (3 << 16) -#define TTBCR_SH0 (3 << 12) -#define TTBCR_ORGN0 (3 << 10) -#define TTBCR_IRGN0 (3 << 8) -#define TTBCR_EPD0 (1 << 7) -#define TTBCR_T0SZ 3 -#define HTCR_MASK (TTBCR_T0SZ | TTBCR_IRGN0 | TTBCR_ORGN0 | TTBCR_SH0) - -/* Hyp System Trap Register */ -#define HSTR_T(x) (1 << x) -#define HSTR_TTEE (1 << 16) -#define HSTR_TJDBX (1 << 17) - -/* Hyp Coprocessor Trap Register */ -#define HCPTR_TCP(x) (1 << x) -#define HCPTR_TCP_MASK (0x3fff) -#define HCPTR_TASE (1 << 15) -#define HCPTR_TTA (1 << 20) -#define HCPTR_TCPAC (1 << 31) - -/* Hyp Debug Configuration Register bits */ -#define HDCR_TDRA (1 << 11) -#define HDCR_TDOSA (1 << 10) -#define HDCR_TDA (1 << 9) -#define HDCR_TDE (1 << 8) -#define HDCR_HPME (1 << 7) -#define HDCR_TPM (1 << 6) -#define HDCR_TPMCR (1 << 5) -#define HDCR_HPMN_MASK (0x1F) - -/* - * The architecture supports 40-bit IPA as input to the 2nd stage translations - * and PTRS_PER_S2_PGD becomes 1024, because each entry covers 1GB of address - * space. - */ -#define KVM_PHYS_SHIFT (40) -#define KVM_PHYS_SIZE (1ULL << KVM_PHYS_SHIFT) -#define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1ULL) -#define PTRS_PER_S2_PGD (1ULL << (KVM_PHYS_SHIFT - 30)) -#define S2_PGD_ORDER get_order(PTRS_PER_S2_PGD * sizeof(pgd_t)) -#define S2_PGD_SIZE (1 << S2_PGD_ORDER) - -/* Virtualization Translation Control Register (VTCR) bits */ -#define VTCR_SH0 (3 << 12) -#define VTCR_ORGN0 (3 << 10) -#define VTCR_IRGN0 (3 << 8) -#define VTCR_SL0 (3 << 6) -#define VTCR_S (1 << 4) -#define VTCR_T0SZ (0xf) -#define VTCR_MASK (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0 | VTCR_SL0 | \ - VTCR_S | VTCR_T0SZ) -#define VTCR_HTCR_SH (VTCR_SH0 | VTCR_ORGN0 | VTCR_IRGN0) -#define VTCR_SL_L2 (0 << 6) /* Starting-level: 2 */ -#define VTCR_SL_L1 (1 << 6) /* Starting-level: 1 */ -#define KVM_VTCR_SL0 VTCR_SL_L1 -/* stage-2 input address range defined as 2^(32-T0SZ) */ -#define KVM_T0SZ (32 - KVM_PHYS_SHIFT) -#define KVM_VTCR_T0SZ (KVM_T0SZ & VTCR_T0SZ) -#define KVM_VTCR_S ((KVM_VTCR_T0SZ << 1) & VTCR_S) - -/* Virtualization Translation Table Base Register (VTTBR) bits */ -#if KVM_VTCR_SL0 == VTCR_SL_L2 /* see ARM DDI 0406C: B4-1720 */ -#define VTTBR_X (14 - KVM_T0SZ) -#else -#define VTTBR_X (5 - KVM_T0SZ) -#endif -#define VTTBR_BADDR_SHIFT (VTTBR_X - 1) -#define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) -#define VTTBR_VMID_SHIFT (48LLU) -#define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) - -/* Hyp Syndrome Register (HSR) bits */ -#define HSR_EC_SHIFT (26) -#define HSR_EC (0x3fU << HSR_EC_SHIFT) -#define HSR_IL (1U << 25) -#define HSR_ISS (HSR_IL - 1) -#define HSR_ISV_SHIFT (24) -#define HSR_ISV (1U << HSR_ISV_SHIFT) -#define HSR_SRT_SHIFT (16) -#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT) -#define HSR_FSC (0x3f) -#define HSR_FSC_TYPE (0x3c) -#define HSR_SSE (1 << 21) -#define HSR_WNR (1 << 6) -#define HSR_CV_SHIFT (24) -#define HSR_CV (1U << HSR_CV_SHIFT) -#define HSR_COND_SHIFT (20) -#define HSR_COND (0xfU << HSR_COND_SHIFT) - -#define FSC_FAULT (0x04) -#define FSC_PERM (0x0c) - -/* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */ -#define HPFAR_MASK (~0xf) - -#define HSR_EC_UNKNOWN (0x00) -#define HSR_EC_WFI (0x01) -#define HSR_EC_CP15_32 (0x03) -#define HSR_EC_CP15_64 (0x04) -#define HSR_EC_CP14_MR (0x05) -#define HSR_EC_CP14_LS (0x06) -#define HSR_EC_CP_0_13 (0x07) -#define HSR_EC_CP10_ID (0x08) -#define HSR_EC_JAZELLE (0x09) -#define HSR_EC_BXJ (0x0A) -#define HSR_EC_CP14_64 (0x0C) -#define HSR_EC_SVC_HYP (0x11) -#define HSR_EC_HVC (0x12) -#define HSR_EC_SMC (0x13) -#define HSR_EC_IABT (0x20) -#define HSR_EC_IABT_HYP (0x21) -#define HSR_EC_DABT (0x24) -#define HSR_EC_DABT_HYP (0x25) - -#define HSR_HVC_IMM_MASK ((1UL << 16) - 1) - -#endif /* __ARM_KVM_ARM_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_asm.h b/trunk/arch/arm/include/asm/kvm_asm.h deleted file mode 100644 index 5e06e8177784..000000000000 --- a/trunk/arch/arm/include/asm/kvm_asm.h +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_ASM_H__ -#define __ARM_KVM_ASM_H__ - -/* 0 is reserved as an invalid value. */ -#define c0_MPIDR 1 /* MultiProcessor ID Register */ -#define c0_CSSELR 2 /* Cache Size Selection Register */ -#define c1_SCTLR 3 /* System Control Register */ -#define c1_ACTLR 4 /* Auxilliary Control Register */ -#define c1_CPACR 5 /* Coprocessor Access Control */ -#define c2_TTBR0 6 /* Translation Table Base Register 0 */ -#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */ -#define c2_TTBR1 8 /* Translation Table Base Register 1 */ -#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */ -#define c2_TTBCR 10 /* Translation Table Base Control R. */ -#define c3_DACR 11 /* Domain Access Control Register */ -#define c5_DFSR 12 /* Data Fault Status Register */ -#define c5_IFSR 13 /* Instruction Fault Status Register */ -#define c5_ADFSR 14 /* Auxilary Data Fault Status R */ -#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */ -#define c6_DFAR 16 /* Data Fault Address Register */ -#define c6_IFAR 17 /* Instruction Fault Address Register */ -#define c9_L2CTLR 18 /* Cortex A15 L2 Control Register */ -#define c10_PRRR 19 /* Primary Region Remap Register */ -#define c10_NMRR 20 /* Normal Memory Remap Register */ -#define c12_VBAR 21 /* Vector Base Address Register */ -#define c13_CID 22 /* Context ID Register */ -#define c13_TID_URW 23 /* Thread ID, User R/W */ -#define c13_TID_URO 24 /* Thread ID, User R/O */ -#define c13_TID_PRIV 25 /* Thread ID, Privileged */ -#define NR_CP15_REGS 26 /* Number of regs (incl. invalid) */ - -#define ARM_EXCEPTION_RESET 0 -#define ARM_EXCEPTION_UNDEFINED 1 -#define ARM_EXCEPTION_SOFTWARE 2 -#define ARM_EXCEPTION_PREF_ABORT 3 -#define ARM_EXCEPTION_DATA_ABORT 4 -#define ARM_EXCEPTION_IRQ 5 -#define ARM_EXCEPTION_FIQ 6 -#define ARM_EXCEPTION_HVC 7 - -#ifndef __ASSEMBLY__ -struct kvm; -struct kvm_vcpu; - -extern char __kvm_hyp_init[]; -extern char __kvm_hyp_init_end[]; - -extern char __kvm_hyp_exit[]; -extern char __kvm_hyp_exit_end[]; - -extern char __kvm_hyp_vector[]; - -extern char __kvm_hyp_code_start[]; -extern char __kvm_hyp_code_end[]; - -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); - -extern void __kvm_flush_vm_context(void); -extern void __kvm_tlb_flush_vmid(struct kvm *kvm); - -extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); -#endif - -#endif /* __ARM_KVM_ASM_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_coproc.h b/trunk/arch/arm/include/asm/kvm_coproc.h deleted file mode 100644 index 4917c2f7e459..000000000000 --- a/trunk/arch/arm/include/asm/kvm_coproc.h +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright (C) 2012 Rusty Russell IBM Corporation - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_COPROC_H__ -#define __ARM_KVM_COPROC_H__ -#include - -void kvm_reset_coprocs(struct kvm_vcpu *vcpu); - -struct kvm_coproc_target_table { - unsigned target; - const struct coproc_reg *table; - size_t num; -}; -void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table); - -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run); -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run); - -unsigned long kvm_arm_num_guest_msrs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_msrindices(struct kvm_vcpu *vcpu, u64 __user *uindices); -void kvm_coproc_table_init(void); - -struct kvm_one_reg; -int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices); -int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); -int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *); -unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu); -#endif /* __ARM_KVM_COPROC_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_emulate.h b/trunk/arch/arm/include/asm/kvm_emulate.h deleted file mode 100644 index fd611996bfb5..000000000000 --- a/trunk/arch/arm/include/asm/kvm_emulate.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_EMULATE_H__ -#define __ARM_KVM_EMULATE_H__ - -#include -#include -#include - -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); -u32 *vcpu_spsr(struct kvm_vcpu *vcpu); - -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run); -void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr); -void kvm_inject_undefined(struct kvm_vcpu *vcpu); -void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); -void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); - -static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu) -{ - return 1; -} - -static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu) -{ - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc; -} - -static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu) -{ - return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr; -} - -static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) -{ - *vcpu_cpsr(vcpu) |= PSR_T_BIT; -} - -static inline bool mode_has_spsr(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; - return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE); -} - -static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK; - return cpsr_mode > USR_MODE;; -} - -static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg) -{ - return reg == 15; -} - -#endif /* __ARM_KVM_EMULATE_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_host.h b/trunk/arch/arm/include/asm/kvm_host.h deleted file mode 100644 index 98b4d1a72923..000000000000 --- a/trunk/arch/arm/include/asm/kvm_host.h +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_HOST_H__ -#define __ARM_KVM_HOST_H__ - -#include -#include -#include -#include - -#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS -#define KVM_MEMORY_SLOTS 32 -#define KVM_PRIVATE_MEM_SLOTS 4 -#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 -#define KVM_HAVE_ONE_REG - -#define KVM_VCPU_MAX_FEATURES 1 - -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x) 0 -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) - -struct kvm_vcpu; -u32 *kvm_vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num, u32 mode); -int kvm_target_cpu(void); -int kvm_reset_vcpu(struct kvm_vcpu *vcpu); -void kvm_reset_coprocs(struct kvm_vcpu *vcpu); - -struct kvm_arch { - /* VTTBR value associated with below pgd and vmid */ - u64 vttbr; - - /* - * Anything that is not used directly from assembly code goes - * here. - */ - - /* The VMID generation used for the virt. memory system */ - u64 vmid_gen; - u32 vmid; - - /* Stage-2 page table */ - pgd_t *pgd; -}; - -#define KVM_NR_MEM_OBJS 40 - -/* - * We don't want allocation failures within the mmu code, so we preallocate - * enough memory for a single page fault in a cache. - */ -struct kvm_mmu_memory_cache { - int nobjs; - void *objects[KVM_NR_MEM_OBJS]; -}; - -struct kvm_vcpu_arch { - struct kvm_regs regs; - - int target; /* Processor target */ - DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES); - - /* System control coprocessor (cp15) */ - u32 cp15[NR_CP15_REGS]; - - /* The CPU type we expose to the VM */ - u32 midr; - - /* Exception Information */ - u32 hsr; /* Hyp Syndrome Register */ - u32 hxfar; /* Hyp Data/Inst Fault Address Register */ - u32 hpfar; /* Hyp IPA Fault Address Register */ - - /* Floating point registers (VFP and Advanced SIMD/NEON) */ - struct vfp_hard_struct vfp_guest; - struct vfp_hard_struct *vfp_host; - - /* - * Anything that is not used directly from assembly code goes - * here. - */ - /* dcache set/way operation pending */ - int last_pcpu; - cpumask_t require_dcache_flush; - - /* Don't run the guest on this vcpu */ - bool pause; - - /* IO related fields */ - struct kvm_decode mmio_decode; - - /* Interrupt related fields */ - u32 irq_lines; /* IRQ and FIQ levels */ - - /* Hyp exception information */ - u32 hyp_pc; /* PC when exception was taken from Hyp mode */ - - /* Cache some mmu pages needed inside spinlock regions */ - struct kvm_mmu_memory_cache mmu_page_cache; - - /* Detect first run of a vcpu */ - bool has_run_once; -}; - -struct kvm_vm_stat { - u32 remote_tlb_flush; -}; - -struct kvm_vcpu_stat { - u32 halt_wakeup; -}; - -struct kvm_vcpu_init; -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init); -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); -struct kvm_one_reg; -int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); -u64 kvm_call_hyp(void *hypfn, ...); -void force_vm_exit(const cpumask_t *mask); - -#define KVM_ARCH_WANT_MMU_NOTIFIER -struct kvm; -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva); -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end); -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); - -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu); -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices); - -/* We do not have shadow page tables, hence the empty hooks */ -static inline int kvm_age_hva(struct kvm *kvm, unsigned long hva) -{ - return 0; -} - -static inline int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) -{ - return 0; -} -#endif /* __ARM_KVM_HOST_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_mmio.h b/trunk/arch/arm/include/asm/kvm_mmio.h deleted file mode 100644 index adcc0d7d3175..000000000000 --- a/trunk/arch/arm/include/asm/kvm_mmio.h +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_MMIO_H__ -#define __ARM_KVM_MMIO_H__ - -#include -#include -#include - -struct kvm_decode { - unsigned long rt; - bool sign_extend; -}; - -/* - * The in-kernel MMIO emulation code wants to use a copy of run->mmio, - * which is an anonymous type. Use our own type instead. - */ -struct kvm_exit_mmio { - phys_addr_t phys_addr; - u8 data[8]; - u32 len; - bool is_write; -}; - -static inline void kvm_prepare_mmio(struct kvm_run *run, - struct kvm_exit_mmio *mmio) -{ - run->mmio.phys_addr = mmio->phys_addr; - run->mmio.len = mmio->len; - run->mmio.is_write = mmio->is_write; - memcpy(run->mmio.data, mmio->data, mmio->len); - run->exit_reason = KVM_EXIT_MMIO; -} - -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa); - -#endif /* __ARM_KVM_MMIO_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_mmu.h b/trunk/arch/arm/include/asm/kvm_mmu.h deleted file mode 100644 index 421a20b34874..000000000000 --- a/trunk/arch/arm/include/asm/kvm_mmu.h +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_MMU_H__ -#define __ARM_KVM_MMU_H__ - -int create_hyp_mappings(void *from, void *to); -int create_hyp_io_mappings(void *from, void *to, phys_addr_t); -void free_hyp_pmds(void); - -int kvm_alloc_stage2_pgd(struct kvm *kvm); -void kvm_free_stage2_pgd(struct kvm *kvm); -int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size); - -int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run); - -void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu); - -phys_addr_t kvm_mmu_get_httbr(void); -int kvm_mmu_init(void); -void kvm_clear_hyp_idmap(void); - -static inline bool kvm_is_write_fault(unsigned long hsr) -{ - unsigned long hsr_ec = hsr >> HSR_EC_SHIFT; - if (hsr_ec == HSR_EC_IABT) - return false; - else if ((hsr & HSR_ISV) && !(hsr & HSR_WNR)) - return false; - else - return true; -} - -#endif /* __ARM_KVM_MMU_H__ */ diff --git a/trunk/arch/arm/include/asm/kvm_psci.h b/trunk/arch/arm/include/asm/kvm_psci.h deleted file mode 100644 index 9a83d98bf170..000000000000 --- a/trunk/arch/arm/include/asm/kvm_psci.h +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#ifndef __ARM_KVM_PSCI_H__ -#define __ARM_KVM_PSCI_H__ - -bool kvm_psci_call(struct kvm_vcpu *vcpu); - -#endif /* __ARM_KVM_PSCI_H__ */ diff --git a/trunk/arch/arm/include/asm/mach/arch.h b/trunk/arch/arm/include/asm/mach/arch.h index 308ad7d6f98b..917d4fcfd9b4 100644 --- a/trunk/arch/arm/include/asm/mach/arch.h +++ b/trunk/arch/arm/include/asm/mach/arch.h @@ -12,6 +12,7 @@ struct tag; struct meminfo; +struct sys_timer; struct pt_regs; struct smp_operations; #ifdef CONFIG_SMP @@ -47,7 +48,7 @@ struct machine_desc { void (*map_io)(void);/* IO mapping function */ void (*init_early)(void); void (*init_irq)(void); - void (*init_time)(void); + struct sys_timer *timer; /* system tick timer */ void (*init_machine)(void); void (*init_late)(void); #ifdef CONFIG_MULTI_IRQ_HANDLER diff --git a/trunk/arch/arm/include/asm/mach/irq.h b/trunk/arch/arm/include/asm/mach/irq.h index 18c883023339..15cb035309f7 100644 --- a/trunk/arch/arm/include/asm/mach/irq.h +++ b/trunk/arch/arm/include/asm/mach/irq.h @@ -22,7 +22,6 @@ extern int show_fiq_list(struct seq_file *, int); #ifdef CONFIG_MULTI_IRQ_HANDLER extern void (*handle_arch_irq)(struct pt_regs *); -extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); #endif /* diff --git a/trunk/arch/arm/include/asm/mach/time.h b/trunk/arch/arm/include/asm/mach/time.h index 90c12e1e695c..6ca945f534ab 100644 --- a/trunk/arch/arm/include/asm/mach/time.h +++ b/trunk/arch/arm/include/asm/mach/time.h @@ -10,6 +10,36 @@ #ifndef __ASM_ARM_MACH_TIME_H #define __ASM_ARM_MACH_TIME_H +/* + * This is our kernel timer structure. + * + * - init + * Initialise the kernels jiffy timer source, claim interrupt + * using setup_irq. This is called early on during initialisation + * while interrupts are still disabled on the local CPU. + * - suspend + * Suspend the kernel jiffy timer source, if necessary. This + * is called with interrupts disabled, after all normal devices + * have been suspended. If no action is required, set this to + * NULL. + * - resume + * Resume the kernel jiffy timer source, if necessary. This + * is called with interrupts disabled before any normal devices + * are resumed. If no action is required, set this to NULL. + * - offset + * Return the timer offset in microseconds since the last timer + * interrupt. Note: this must take account of any unprocessed + * timer interrupt which may be pending. + */ +struct sys_timer { + void (*init)(void); + void (*suspend)(void); + void (*resume)(void); +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET + unsigned long (*offset)(void); +#endif +}; + extern void timer_tick(void); struct timespec; diff --git a/trunk/arch/arm/include/asm/memory.h b/trunk/arch/arm/include/asm/memory.h index 1c4df27f9332..73cf03aa981e 100644 --- a/trunk/arch/arm/include/asm/memory.h +++ b/trunk/arch/arm/include/asm/memory.h @@ -37,7 +37,7 @@ */ #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) -#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) +#define TASK_UNMAPPED_BASE (UL(CONFIG_PAGE_OFFSET) / 3) /* * The maximum size of a 26-bit user space task. diff --git a/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h b/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h index 18f5cef82ad5..d7952824c5c4 100644 --- a/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h +++ b/trunk/arch/arm/include/asm/pgtable-3level-hwdef.h @@ -32,9 +32,6 @@ #define PMD_TYPE_SECT (_AT(pmdval_t, 1) << 0) #define PMD_BIT4 (_AT(pmdval_t, 0)) #define PMD_DOMAIN(x) (_AT(pmdval_t, 0)) -#define PMD_APTABLE_SHIFT (61) -#define PMD_APTABLE (_AT(pgdval_t, 3) << PGD_APTABLE_SHIFT) -#define PMD_PXNTABLE (_AT(pgdval_t, 1) << 59) /* * - section @@ -44,11 +41,9 @@ #define PMD_SECT_S (_AT(pmdval_t, 3) << 8) #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10) #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11) -#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 53) #define PMD_SECT_XN (_AT(pmdval_t, 1) << 54) #define PMD_SECT_AP_WRITE (_AT(pmdval_t, 0)) #define PMD_SECT_AP_READ (_AT(pmdval_t, 0)) -#define PMD_SECT_AP1 (_AT(pmdval_t, 1) << 6) #define PMD_SECT_TEX(x) (_AT(pmdval_t, 0)) /* diff --git a/trunk/arch/arm/include/asm/pgtable-3level.h b/trunk/arch/arm/include/asm/pgtable-3level.h index 6ef8afd1b64c..a3f37929940a 100644 --- a/trunk/arch/arm/include/asm/pgtable-3level.h +++ b/trunk/arch/arm/include/asm/pgtable-3level.h @@ -104,29 +104,11 @@ */ #define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ -/* - * 2nd stage PTE definitions for LPAE. - */ -#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */ -#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */ -#define L_PTE_S2_RDWR (_AT(pteval_t, 2) << 6) /* HAP[2:1] */ - -/* - * Hyp-mode PL2 PTE definitions for LPAE. - */ -#define L_PTE_HYP L_PTE_USER - #ifndef __ASSEMBLY__ #define pud_none(pud) (!pud_val(pud)) #define pud_bad(pud) (!(pud_val(pud) & 2)) #define pud_present(pud) (pud_val(pud)) -#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ - PMD_TYPE_TABLE) -#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ - PMD_TYPE_SECT) #define pud_clear(pudp) \ do { \ diff --git a/trunk/arch/arm/include/asm/pgtable.h b/trunk/arch/arm/include/asm/pgtable.h index f30ac3b55ba9..9c82f988c0e3 100644 --- a/trunk/arch/arm/include/asm/pgtable.h +++ b/trunk/arch/arm/include/asm/pgtable.h @@ -70,9 +70,6 @@ extern void __pgd_error(const char *file, int line, pgd_t); extern pgprot_t pgprot_user; extern pgprot_t pgprot_kernel; -extern pgprot_t pgprot_hyp_device; -extern pgprot_t pgprot_s2; -extern pgprot_t pgprot_s2_device; #define _MOD_PROT(p, b) __pgprot(pgprot_val(p) | (b)) @@ -85,10 +82,6 @@ extern pgprot_t pgprot_s2_device; #define PAGE_READONLY_EXEC _MOD_PROT(pgprot_user, L_PTE_USER | L_PTE_RDONLY) #define PAGE_KERNEL _MOD_PROT(pgprot_kernel, L_PTE_XN) #define PAGE_KERNEL_EXEC pgprot_kernel -#define PAGE_HYP _MOD_PROT(pgprot_kernel, L_PTE_HYP) -#define PAGE_HYP_DEVICE _MOD_PROT(pgprot_hyp_device, L_PTE_HYP) -#define PAGE_S2 _MOD_PROT(pgprot_s2, L_PTE_S2_RDONLY) -#define PAGE_S2_DEVICE _MOD_PROT(pgprot_s2_device, L_PTE_USER | L_PTE_S2_RDONLY) #define __PAGE_NONE __pgprot(_L_PTE_DEFAULT | L_PTE_RDONLY | L_PTE_XN | L_PTE_NONE) #define __PAGE_SHARED __pgprot(_L_PTE_DEFAULT | L_PTE_USER | L_PTE_XN) diff --git a/trunk/arch/arm/include/uapi/asm/kvm.h b/trunk/arch/arm/include/uapi/asm/kvm.h deleted file mode 100644 index 3303ff5adbf3..000000000000 --- a/trunk/arch/arm/include/uapi/asm/kvm.h +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_H__ -#define __ARM_KVM_H__ - -#include -#include - -#define __KVM_HAVE_GUEST_DEBUG -#define __KVM_HAVE_IRQ_LINE - -#define KVM_REG_SIZE(id) \ - (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) - -/* Valid for svc_regs, abt_regs, und_regs, irq_regs in struct kvm_regs */ -#define KVM_ARM_SVC_sp svc_regs[0] -#define KVM_ARM_SVC_lr svc_regs[1] -#define KVM_ARM_SVC_spsr svc_regs[2] -#define KVM_ARM_ABT_sp abt_regs[0] -#define KVM_ARM_ABT_lr abt_regs[1] -#define KVM_ARM_ABT_spsr abt_regs[2] -#define KVM_ARM_UND_sp und_regs[0] -#define KVM_ARM_UND_lr und_regs[1] -#define KVM_ARM_UND_spsr und_regs[2] -#define KVM_ARM_IRQ_sp irq_regs[0] -#define KVM_ARM_IRQ_lr irq_regs[1] -#define KVM_ARM_IRQ_spsr irq_regs[2] - -/* Valid only for fiq_regs in struct kvm_regs */ -#define KVM_ARM_FIQ_r8 fiq_regs[0] -#define KVM_ARM_FIQ_r9 fiq_regs[1] -#define KVM_ARM_FIQ_r10 fiq_regs[2] -#define KVM_ARM_FIQ_fp fiq_regs[3] -#define KVM_ARM_FIQ_ip fiq_regs[4] -#define KVM_ARM_FIQ_sp fiq_regs[5] -#define KVM_ARM_FIQ_lr fiq_regs[6] -#define KVM_ARM_FIQ_spsr fiq_regs[7] - -struct kvm_regs { - struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */ - __u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */ - __u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */ - __u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */ - __u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */ - __u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */ -}; - -/* Supported Processor Types */ -#define KVM_ARM_TARGET_CORTEX_A15 0 -#define KVM_ARM_NUM_TARGETS 1 - -#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ - -struct kvm_vcpu_init { - __u32 target; - __u32 features[7]; -}; - -struct kvm_sregs { -}; - -struct kvm_fpu { -}; - -struct kvm_guest_debug_arch { -}; - -struct kvm_debug_exit_arch { -}; - -struct kvm_sync_regs { -}; - -struct kvm_arch_memory_slot { -}; - -/* If you need to interpret the index values, here is the key: */ -#define KVM_REG_ARM_COPROC_MASK 0x000000000FFF0000 -#define KVM_REG_ARM_COPROC_SHIFT 16 -#define KVM_REG_ARM_32_OPC2_MASK 0x0000000000000007 -#define KVM_REG_ARM_32_OPC2_SHIFT 0 -#define KVM_REG_ARM_OPC1_MASK 0x0000000000000078 -#define KVM_REG_ARM_OPC1_SHIFT 3 -#define KVM_REG_ARM_CRM_MASK 0x0000000000000780 -#define KVM_REG_ARM_CRM_SHIFT 7 -#define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 -#define KVM_REG_ARM_32_CRN_SHIFT 11 - -/* Normal registers are mapped as coprocessor 16. */ -#define KVM_REG_ARM_CORE (0x0010 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_CORE_REG(name) (offsetof(struct kvm_regs, name) / 4) - -/* Some registers need more space to represent values. */ -#define KVM_REG_ARM_DEMUX (0x0011 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_DEMUX_ID_MASK 0x000000000000FF00 -#define KVM_REG_ARM_DEMUX_ID_SHIFT 8 -#define KVM_REG_ARM_DEMUX_ID_CCSIDR (0x00 << KVM_REG_ARM_DEMUX_ID_SHIFT) -#define KVM_REG_ARM_DEMUX_VAL_MASK 0x00000000000000FF -#define KVM_REG_ARM_DEMUX_VAL_SHIFT 0 - -/* VFP registers: we could overload CP10 like ARM does, but that's ugly. */ -#define KVM_REG_ARM_VFP (0x0012 << KVM_REG_ARM_COPROC_SHIFT) -#define KVM_REG_ARM_VFP_MASK 0x000000000000FFFF -#define KVM_REG_ARM_VFP_BASE_REG 0x0 -#define KVM_REG_ARM_VFP_FPSID 0x1000 -#define KVM_REG_ARM_VFP_FPSCR 0x1001 -#define KVM_REG_ARM_VFP_MVFR1 0x1006 -#define KVM_REG_ARM_VFP_MVFR0 0x1007 -#define KVM_REG_ARM_VFP_FPEXC 0x1008 -#define KVM_REG_ARM_VFP_FPINST 0x1009 -#define KVM_REG_ARM_VFP_FPINST2 0x100A - - -/* KVM_IRQ_LINE irq field index values */ -#define KVM_ARM_IRQ_TYPE_SHIFT 24 -#define KVM_ARM_IRQ_TYPE_MASK 0xff -#define KVM_ARM_IRQ_VCPU_SHIFT 16 -#define KVM_ARM_IRQ_VCPU_MASK 0xff -#define KVM_ARM_IRQ_NUM_SHIFT 0 -#define KVM_ARM_IRQ_NUM_MASK 0xffff - -/* irq_type field */ -#define KVM_ARM_IRQ_TYPE_CPU 0 -#define KVM_ARM_IRQ_TYPE_SPI 1 -#define KVM_ARM_IRQ_TYPE_PPI 2 - -/* out-of-kernel GIC cpu interrupt injection irq_number field */ -#define KVM_ARM_IRQ_CPU_IRQ 0 -#define KVM_ARM_IRQ_CPU_FIQ 1 - -/* Highest supported SPI, from VGIC_NR_IRQS */ -#define KVM_ARM_IRQ_GIC_MAX 127 - -/* PSCI interface */ -#define KVM_PSCI_FN_BASE 0x95c1ba5e -#define KVM_PSCI_FN(n) (KVM_PSCI_FN_BASE + (n)) - -#define KVM_PSCI_FN_CPU_SUSPEND KVM_PSCI_FN(0) -#define KVM_PSCI_FN_CPU_OFF KVM_PSCI_FN(1) -#define KVM_PSCI_FN_CPU_ON KVM_PSCI_FN(2) -#define KVM_PSCI_FN_MIGRATE KVM_PSCI_FN(3) - -#define KVM_PSCI_RET_SUCCESS 0 -#define KVM_PSCI_RET_NI ((unsigned long)-1) -#define KVM_PSCI_RET_INVAL ((unsigned long)-2) -#define KVM_PSCI_RET_DENIED ((unsigned long)-3) - -#endif /* __ARM_KVM_H__ */ diff --git a/trunk/arch/arm/kernel/asm-offsets.c b/trunk/arch/arm/kernel/asm-offsets.c index c8b3272dfed1..c985b481192c 100644 --- a/trunk/arch/arm/kernel/asm-offsets.c +++ b/trunk/arch/arm/kernel/asm-offsets.c @@ -13,9 +13,6 @@ #include #include #include -#ifdef CONFIG_KVM_ARM_HOST -#include -#endif #include #include #include @@ -149,27 +146,5 @@ int main(void) DEFINE(DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); -#ifdef CONFIG_KVM_ARM_HOST - DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm)); - DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr)); - DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15)); - DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest)); - DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.vfp_host)); - DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs)); - DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs)); - DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs)); - DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs)); - DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs)); - DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs)); - DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs)); - DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc)); - DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr)); - DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines)); - DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr)); - DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar)); - DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar)); - DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc)); - DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr)); -#endif return 0; } diff --git a/trunk/arch/arm/kernel/irq.c b/trunk/arch/arm/kernel/irq.c index 8e4ef4c83a74..896165096d6a 100644 --- a/trunk/arch/arm/kernel/irq.c +++ b/trunk/arch/arm/kernel/irq.c @@ -117,16 +117,6 @@ void __init init_IRQ(void) machine_desc->init_irq(); } -#ifdef CONFIG_MULTI_IRQ_HANDLER -void __init set_handle_irq(void (*handle_irq)(struct pt_regs *)) -{ - if (handle_arch_irq) - return; - - handle_arch_irq = handle_irq; -} -#endif - #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { diff --git a/trunk/arch/arm/kernel/perf_event.c b/trunk/arch/arm/kernel/perf_event.c index 31e0eb353cd8..f9e8657dd241 100644 --- a/trunk/arch/arm/kernel/perf_event.c +++ b/trunk/arch/arm/kernel/perf_event.c @@ -149,6 +149,12 @@ u64 armpmu_event_update(struct perf_event *event) static void armpmu_read(struct perf_event *event) { + struct hw_perf_event *hwc = &event->hw; + + /* Don't read disabled counters! */ + if (hwc->idx < 0) + return; + armpmu_event_update(event); } @@ -201,6 +207,8 @@ armpmu_del(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; int idx = hwc->idx; + WARN_ON(idx < 0); + armpmu_stop(event, PERF_EF_UPDATE); hw_events->events[idx] = NULL; clear_bit(idx, hw_events->used_mask); @@ -350,7 +358,7 @@ __hw_perf_event_init(struct perf_event *event) { struct arm_pmu *armpmu = to_arm_pmu(event->pmu); struct hw_perf_event *hwc = &event->hw; - int mapping; + int mapping, err; mapping = armpmu->map_event(event); @@ -399,12 +407,14 @@ __hw_perf_event_init(struct perf_event *event) local64_set(&hwc->period_left, hwc->sample_period); } + err = 0; if (event->group_leader != event) { - if (validate_group(event) != 0); + err = validate_group(event); + if (err) return -EINVAL; } - return 0; + return err; } static int armpmu_event_init(struct perf_event *event) diff --git a/trunk/arch/arm/kernel/perf_event_cpu.c b/trunk/arch/arm/kernel/perf_event_cpu.c index 1f2740e3dbc0..5f6620684e25 100644 --- a/trunk/arch/arm/kernel/perf_event_cpu.c +++ b/trunk/arch/arm/kernel/perf_event_cpu.c @@ -147,7 +147,7 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) cpu_pmu->free_irq = cpu_pmu_free_irq; /* Ensure the PMU has sane values out of reset. */ - if (cpu_pmu->reset) + if (cpu_pmu && cpu_pmu->reset) on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); } @@ -201,46 +201,48 @@ static struct platform_device_id cpu_pmu_plat_device_ids[] = { static int probe_current_pmu(struct arm_pmu *pmu) { int cpu = get_cpu(); - unsigned long implementor = read_cpuid_implementor(); - unsigned long part_number = read_cpuid_part_number(); + unsigned long cpuid = read_cpuid_id(); + unsigned long implementor = (cpuid & 0xFF000000) >> 24; + unsigned long part_number = (cpuid & 0xFFF0); int ret = -ENODEV; pr_info("probing PMU on CPU %d\n", cpu); /* ARM Ltd CPUs. */ - if (implementor == ARM_CPU_IMP_ARM) { + if (0x41 == implementor) { switch (part_number) { - case ARM_CPU_PART_ARM1136: - case ARM_CPU_PART_ARM1156: - case ARM_CPU_PART_ARM1176: + case 0xB360: /* ARM1136 */ + case 0xB560: /* ARM1156 */ + case 0xB760: /* ARM1176 */ ret = armv6pmu_init(pmu); break; - case ARM_CPU_PART_ARM11MPCORE: + case 0xB020: /* ARM11mpcore */ ret = armv6mpcore_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A8: + case 0xC080: /* Cortex-A8 */ ret = armv7_a8_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A9: + case 0xC090: /* Cortex-A9 */ ret = armv7_a9_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A5: + case 0xC050: /* Cortex-A5 */ ret = armv7_a5_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A15: + case 0xC0F0: /* Cortex-A15 */ ret = armv7_a15_pmu_init(pmu); break; - case ARM_CPU_PART_CORTEX_A7: + case 0xC070: /* Cortex-A7 */ ret = armv7_a7_pmu_init(pmu); break; } /* Intel CPUs [xscale]. */ - } else if (implementor == ARM_CPU_IMP_INTEL) { - switch (xscale_cpu_arch_version()) { - case ARM_CPU_XSCALE_ARCH_V1: + } else if (0x69 == implementor) { + part_number = (cpuid >> 13) & 0x7; + switch (part_number) { + case 1: ret = xscale1pmu_init(pmu); break; - case ARM_CPU_XSCALE_ARCH_V2: + case 2: ret = xscale2pmu_init(pmu); break; } @@ -277,22 +279,17 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) } if (ret) { - pr_info("failed to probe PMU!"); - goto out_free; + pr_info("failed to register PMU devices!"); + kfree(pmu); + return ret; } cpu_pmu = pmu; cpu_pmu->plat_device = pdev; cpu_pmu_init(cpu_pmu); - ret = armpmu_register(cpu_pmu, PERF_TYPE_RAW); - - if (!ret) - return 0; + armpmu_register(cpu_pmu, PERF_TYPE_RAW); -out_free: - pr_info("failed to register PMU devices!"); - kfree(pmu); - return ret; + return 0; } static struct platform_driver cpu_pmu_driver = { diff --git a/trunk/arch/arm/kernel/perf_event_v6.c b/trunk/arch/arm/kernel/perf_event_v6.c index 03664b0e8fa4..041d0526a288 100644 --- a/trunk/arch/arm/kernel/perf_event_v6.c +++ b/trunk/arch/arm/kernel/perf_event_v6.c @@ -106,7 +106,7 @@ static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -259,7 +259,7 @@ static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/trunk/arch/arm/kernel/perf_event_v7.c b/trunk/arch/arm/kernel/perf_event_v7.c index 8c79a9e70b83..4fbc757d9cff 100644 --- a/trunk/arch/arm/kernel/perf_event_v7.c +++ b/trunk/arch/arm/kernel/perf_event_v7.c @@ -157,8 +157,8 @@ static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -282,7 +282,7 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -399,8 +399,8 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, /* * The prefetch counters don't differentiate between the I @@ -527,8 +527,8 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, @@ -651,8 +651,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_WRITE)] = { - [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/trunk/arch/arm/kernel/perf_event_xscale.c b/trunk/arch/arm/kernel/perf_event_xscale.c index 63990c42fac9..2b0fe30ec12e 100644 --- a/trunk/arch/arm/kernel/perf_event_xscale.c +++ b/trunk/arch/arm/kernel/perf_event_xscale.c @@ -83,7 +83,7 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, [C(OP_WRITE)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, - [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS, }, [C(OP_PREFETCH)] = { [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, diff --git a/trunk/arch/arm/kernel/smp.c b/trunk/arch/arm/kernel/smp.c index a77b0532f97e..b7e3b506219b 100644 --- a/trunk/arch/arm/kernel/smp.c +++ b/trunk/arch/arm/kernel/smp.c @@ -416,8 +416,7 @@ static void (*smp_cross_call)(const struct cpumask *, unsigned int); void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) { - if (!smp_cross_call) - smp_cross_call = fn; + smp_cross_call = fn; } void arch_send_call_function_ipi_mask(const struct cpumask *mask) diff --git a/trunk/arch/arm/kernel/smp_twd.c b/trunk/arch/arm/kernel/smp_twd.c index dc9bb0146665..49f335d301ba 100644 --- a/trunk/arch/arm/kernel/smp_twd.c +++ b/trunk/arch/arm/kernel/smp_twd.c @@ -24,6 +24,7 @@ #include #include +#include /* set up by the platform code */ static void __iomem *twd_base; diff --git a/trunk/arch/arm/kernel/time.c b/trunk/arch/arm/kernel/time.c index 955d92d265e5..09be0c3c9069 100644 --- a/trunk/arch/arm/kernel/time.c +++ b/trunk/arch/arm/kernel/time.c @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -30,6 +31,11 @@ #include #include +/* + * Our system timer. + */ +static struct sys_timer *system_timer; + #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \ defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) /* this needs a better home */ @@ -63,6 +69,16 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); #endif +#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET +u32 arch_gettimeoffset(void) +{ + if (system_timer->offset != NULL) + return system_timer->offset() * 1000; + + return 0; +} +#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */ + #ifndef CONFIG_GENERIC_CLOCKEVENTS /* * Kernel system timer support. @@ -113,8 +129,43 @@ int __init register_persistent_clock(clock_access_fn read_boot, return -EINVAL; } +#if defined(CONFIG_PM) && !defined(CONFIG_GENERIC_CLOCKEVENTS) +static int timer_suspend(void) +{ + if (system_timer->suspend) + system_timer->suspend(); + + return 0; +} + +static void timer_resume(void) +{ + if (system_timer->resume) + system_timer->resume(); +} +#else +#define timer_suspend NULL +#define timer_resume NULL +#endif + +static struct syscore_ops timer_syscore_ops = { + .suspend = timer_suspend, + .resume = timer_resume, +}; + +static int __init timer_init_syscore_ops(void) +{ + register_syscore_ops(&timer_syscore_ops); + + return 0; +} + +device_initcall(timer_init_syscore_ops); + void __init time_init(void) { - machine_desc->init_time(); + system_timer = machine_desc->timer; + system_timer->init(); sched_clock_postinit(); } + diff --git a/trunk/arch/arm/kernel/vmlinux.lds.S b/trunk/arch/arm/kernel/vmlinux.lds.S index b571484e9f03..11c1785bf63e 100644 --- a/trunk/arch/arm/kernel/vmlinux.lds.S +++ b/trunk/arch/arm/kernel/vmlinux.lds.S @@ -19,11 +19,7 @@ ALIGN_FUNCTION(); \ VMLINUX_SYMBOL(__idmap_text_start) = .; \ *(.idmap.text) \ - VMLINUX_SYMBOL(__idmap_text_end) = .; \ - ALIGN_FUNCTION(); \ - VMLINUX_SYMBOL(__hyp_idmap_text_start) = .; \ - *(.hyp.idmap.text) \ - VMLINUX_SYMBOL(__hyp_idmap_text_end) = .; + VMLINUX_SYMBOL(__idmap_text_end) = .; #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) diff --git a/trunk/arch/arm/kvm/Kconfig b/trunk/arch/arm/kvm/Kconfig deleted file mode 100644 index 05227cb57a7b..000000000000 --- a/trunk/arch/arm/kvm/Kconfig +++ /dev/null @@ -1,56 +0,0 @@ -# -# KVM configuration -# - -source "virt/kvm/Kconfig" - -menuconfig VIRTUALIZATION - bool "Virtualization" - ---help--- - Say Y here to get to see options for using your Linux host to run - other operating systems inside virtual machines (guests). - This option alone does not add any kernel code. - - If you say N, all options in this submenu will be skipped and - disabled. - -if VIRTUALIZATION - -config KVM - bool "Kernel-based Virtual Machine (KVM) support" - select PREEMPT_NOTIFIERS - select ANON_INODES - select KVM_MMIO - select KVM_ARM_HOST - depends on ARM_VIRT_EXT && ARM_LPAE - ---help--- - Support hosting virtualized guest machines. You will also - need to select one or more of the processor modules below. - - This module provides access to the hardware capabilities through - a character device node named /dev/kvm. - - If unsure, say N. - -config KVM_ARM_HOST - bool "KVM host support for ARM cpus." - depends on KVM - depends on MMU - select MMU_NOTIFIER - ---help--- - Provides host support for ARM processors. - -config KVM_ARM_MAX_VCPUS - int "Number maximum supported virtual CPUs per VM" - depends on KVM_ARM_HOST - default 4 - help - Static number of max supported virtual CPUs per VM. - - If you choose a high number, the vcpu structures will be quite - large, so only choose a reasonable number that you expect to - actually use. - -source drivers/virtio/Kconfig - -endif # VIRTUALIZATION diff --git a/trunk/arch/arm/kvm/Makefile b/trunk/arch/arm/kvm/Makefile deleted file mode 100644 index ea27987bd07f..000000000000 --- a/trunk/arch/arm/kvm/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -# -# Makefile for Kernel-based Virtual Machine module -# - -plus_virt := $(call as-instr,.arch_extension virt,+virt) -ifeq ($(plus_virt),+virt) - plus_virt_def := -DREQUIRES_VIRT=1 -endif - -ccflags-y += -Ivirt/kvm -Iarch/arm/kvm -CFLAGS_arm.o := -I. $(plus_virt_def) -CFLAGS_mmu.o := -I. - -AFLAGS_init.o := -Wa,-march=armv7-a$(plus_virt) -AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt) - -kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) - -obj-y += kvm-arm.o init.o interrupts.o -obj-y += arm.o guest.o mmu.o emulate.o reset.o -obj-y += coproc.o coproc_a15.o mmio.o psci.o diff --git a/trunk/arch/arm/kvm/arm.c b/trunk/arch/arm/kvm/arm.c deleted file mode 100644 index 2d30e3afdaf9..000000000000 --- a/trunk/arch/arm/kvm/arm.c +++ /dev/null @@ -1,1015 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define CREATE_TRACE_POINTS -#include "trace.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#ifdef REQUIRES_VIRT -__asm__(".arch_extension virt"); -#endif - -static DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page); -static struct vfp_hard_struct __percpu *kvm_host_vfp_state; -static unsigned long hyp_default_vectors; - -/* The VMID used in the VTTBR */ -static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1); -static u8 kvm_next_vmid; -static DEFINE_SPINLOCK(kvm_vmid_lock); - -int kvm_arch_hardware_enable(void *garbage) -{ - return 0; -} - -int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) -{ - return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; -} - -void kvm_arch_hardware_disable(void *garbage) -{ -} - -int kvm_arch_hardware_setup(void) -{ - return 0; -} - -void kvm_arch_hardware_unsetup(void) -{ -} - -void kvm_arch_check_processor_compat(void *rtn) -{ - *(int *)rtn = 0; -} - -void kvm_arch_sync_events(struct kvm *kvm) -{ -} - -/** - * kvm_arch_init_vm - initializes a VM data structure - * @kvm: pointer to the KVM struct - */ -int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) -{ - int ret = 0; - - if (type) - return -EINVAL; - - ret = kvm_alloc_stage2_pgd(kvm); - if (ret) - goto out_fail_alloc; - - ret = create_hyp_mappings(kvm, kvm + 1); - if (ret) - goto out_free_stage2_pgd; - - /* Mark the initial VMID generation invalid */ - kvm->arch.vmid_gen = 0; - - return ret; -out_free_stage2_pgd: - kvm_free_stage2_pgd(kvm); -out_fail_alloc: - return ret; -} - -int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) -{ - return VM_FAULT_SIGBUS; -} - -void kvm_arch_free_memslot(struct kvm_memory_slot *free, - struct kvm_memory_slot *dont) -{ -} - -int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) -{ - return 0; -} - -/** - * kvm_arch_destroy_vm - destroy the VM data structure - * @kvm: pointer to the KVM struct - */ -void kvm_arch_destroy_vm(struct kvm *kvm) -{ - int i; - - kvm_free_stage2_pgd(kvm); - - for (i = 0; i < KVM_MAX_VCPUS; ++i) { - if (kvm->vcpus[i]) { - kvm_arch_vcpu_free(kvm->vcpus[i]); - kvm->vcpus[i] = NULL; - } - } -} - -int kvm_dev_ioctl_check_extension(long ext) -{ - int r; - switch (ext) { - case KVM_CAP_USER_MEMORY: - case KVM_CAP_SYNC_MMU: - case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: - case KVM_CAP_ONE_REG: - case KVM_CAP_ARM_PSCI: - r = 1; - break; - case KVM_CAP_COALESCED_MMIO: - r = KVM_COALESCED_MMIO_PAGE_OFFSET; - break; - case KVM_CAP_NR_VCPUS: - r = num_online_cpus(); - break; - case KVM_CAP_MAX_VCPUS: - r = KVM_MAX_VCPUS; - break; - default: - r = 0; - break; - } - return r; -} - -long kvm_arch_dev_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - return -EINVAL; -} - -int kvm_arch_set_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old, - int user_alloc) -{ - return 0; -} - -int kvm_arch_prepare_memory_region(struct kvm *kvm, - struct kvm_memory_slot *memslot, - struct kvm_memory_slot old, - struct kvm_userspace_memory_region *mem, - int user_alloc) -{ - return 0; -} - -void kvm_arch_commit_memory_region(struct kvm *kvm, - struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot old, - int user_alloc) -{ -} - -void kvm_arch_flush_shadow_all(struct kvm *kvm) -{ -} - -void kvm_arch_flush_shadow_memslot(struct kvm *kvm, - struct kvm_memory_slot *slot) -{ -} - -struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) -{ - int err; - struct kvm_vcpu *vcpu; - - vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); - if (!vcpu) { - err = -ENOMEM; - goto out; - } - - err = kvm_vcpu_init(vcpu, kvm, id); - if (err) - goto free_vcpu; - - err = create_hyp_mappings(vcpu, vcpu + 1); - if (err) - goto vcpu_uninit; - - return vcpu; -vcpu_uninit: - kvm_vcpu_uninit(vcpu); -free_vcpu: - kmem_cache_free(kvm_vcpu_cache, vcpu); -out: - return ERR_PTR(err); -} - -int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) -{ - return 0; -} - -void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) -{ - kvm_mmu_free_memory_caches(vcpu); - kmem_cache_free(kvm_vcpu_cache, vcpu); -} - -void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) -{ - kvm_arch_vcpu_free(vcpu); -} - -int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) -{ - return 0; -} - -int __attribute_const__ kvm_target_cpu(void) -{ - unsigned long implementor = read_cpuid_implementor(); - unsigned long part_number = read_cpuid_part_number(); - - if (implementor != ARM_CPU_IMP_ARM) - return -EINVAL; - - switch (part_number) { - case ARM_CPU_PART_CORTEX_A15: - return KVM_ARM_TARGET_CORTEX_A15; - default: - return -EINVAL; - } -} - -int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) -{ - /* Force users to call KVM_ARM_VCPU_INIT */ - vcpu->arch.target = -1; - return 0; -} - -void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) -{ -} - -void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) -{ - vcpu->cpu = cpu; - vcpu->arch.vfp_host = this_cpu_ptr(kvm_host_vfp_state); - - /* - * Check whether this vcpu requires the cache to be flushed on - * this physical CPU. This is a consequence of doing dcache - * operations by set/way on this vcpu. We do it here to be in - * a non-preemptible section. - */ - if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush)) - flush_cache_all(); /* We'd really want v7_flush_dcache_all() */ -} - -void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) -{ -} - -int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, - struct kvm_guest_debug *dbg) -{ - return -EINVAL; -} - - -int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, - struct kvm_mp_state *mp_state) -{ - return -EINVAL; -} - -/** - * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled - * @v: The VCPU pointer - * - * If the guest CPU is not waiting for interrupts or an interrupt line is - * asserted, the CPU is by definition runnable. - */ -int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) -{ - return !!v->arch.irq_lines; -} - -/* Just ensure a guest exit from a particular CPU */ -static void exit_vm_noop(void *info) -{ -} - -void force_vm_exit(const cpumask_t *mask) -{ - smp_call_function_many(mask, exit_vm_noop, NULL, true); -} - -/** - * need_new_vmid_gen - check that the VMID is still valid - * @kvm: The VM's VMID to checkt - * - * return true if there is a new generation of VMIDs being used - * - * The hardware supports only 256 values with the value zero reserved for the - * host, so we check if an assigned value belongs to a previous generation, - * which which requires us to assign a new value. If we're the first to use a - * VMID for the new generation, we must flush necessary caches and TLBs on all - * CPUs. - */ -static bool need_new_vmid_gen(struct kvm *kvm) -{ - return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen)); -} - -/** - * update_vttbr - Update the VTTBR with a valid VMID before the guest runs - * @kvm The guest that we are about to run - * - * Called from kvm_arch_vcpu_ioctl_run before entering the guest to ensure the - * VM has a valid VMID, otherwise assigns a new one and flushes corresponding - * caches and TLBs. - */ -static void update_vttbr(struct kvm *kvm) -{ - phys_addr_t pgd_phys; - u64 vmid; - - if (!need_new_vmid_gen(kvm)) - return; - - spin_lock(&kvm_vmid_lock); - - /* - * We need to re-check the vmid_gen here to ensure that if another vcpu - * already allocated a valid vmid for this vm, then this vcpu should - * use the same vmid. - */ - if (!need_new_vmid_gen(kvm)) { - spin_unlock(&kvm_vmid_lock); - return; - } - - /* First user of a new VMID generation? */ - if (unlikely(kvm_next_vmid == 0)) { - atomic64_inc(&kvm_vmid_gen); - kvm_next_vmid = 1; - - /* - * On SMP we know no other CPUs can use this CPU's or each - * other's VMID after force_vm_exit returns since the - * kvm_vmid_lock blocks them from reentry to the guest. - */ - force_vm_exit(cpu_all_mask); - /* - * Now broadcast TLB + ICACHE invalidation over the inner - * shareable domain to make sure all data structures are - * clean. - */ - kvm_call_hyp(__kvm_flush_vm_context); - } - - kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen); - kvm->arch.vmid = kvm_next_vmid; - kvm_next_vmid++; - - /* update vttbr to be used with the new vmid */ - pgd_phys = virt_to_phys(kvm->arch.pgd); - vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; - kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; - kvm->arch.vttbr |= vmid; - - spin_unlock(&kvm_vmid_lock); -} - -static int handle_svc_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* SVC called from Hyp mode should never get here */ - kvm_debug("SVC called from Hyp mode shouldn't go here\n"); - BUG(); - return -EINVAL; /* Squash warning */ -} - -static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - trace_kvm_hvc(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0), - vcpu->arch.hsr & HSR_HVC_IMM_MASK); - - if (kvm_psci_call(vcpu)) - return 1; - - kvm_inject_undefined(vcpu); - return 1; -} - -static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - if (kvm_psci_call(vcpu)) - return 1; - - kvm_inject_undefined(vcpu); - return 1; -} - -static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* The hypervisor should never cause aborts */ - kvm_err("Prefetch Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", - vcpu->arch.hxfar, vcpu->arch.hsr); - return -EFAULT; -} - -static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* This is either an error in the ws. code or an external abort */ - kvm_err("Data Abort taken from Hyp mode at %#08x (HSR: %#08x)\n", - vcpu->arch.hxfar, vcpu->arch.hsr); - return -EFAULT; -} - -typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *); -static exit_handle_fn arm_exit_handlers[] = { - [HSR_EC_WFI] = kvm_handle_wfi, - [HSR_EC_CP15_32] = kvm_handle_cp15_32, - [HSR_EC_CP15_64] = kvm_handle_cp15_64, - [HSR_EC_CP14_MR] = kvm_handle_cp14_access, - [HSR_EC_CP14_LS] = kvm_handle_cp14_load_store, - [HSR_EC_CP14_64] = kvm_handle_cp14_access, - [HSR_EC_CP_0_13] = kvm_handle_cp_0_13_access, - [HSR_EC_CP10_ID] = kvm_handle_cp10_id, - [HSR_EC_SVC_HYP] = handle_svc_hyp, - [HSR_EC_HVC] = handle_hvc, - [HSR_EC_SMC] = handle_smc, - [HSR_EC_IABT] = kvm_handle_guest_abort, - [HSR_EC_IABT_HYP] = handle_pabt_hyp, - [HSR_EC_DABT] = kvm_handle_guest_abort, - [HSR_EC_DABT_HYP] = handle_dabt_hyp, -}; - -/* - * A conditional instruction is allowed to trap, even though it - * wouldn't be executed. So let's re-implement the hardware, in - * software! - */ -static bool kvm_condition_valid(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr, cond, insn; - - /* - * Exception Code 0 can only happen if we set HCR.TGE to 1, to - * catch undefined instructions, and then we won't get past - * the arm_exit_handlers test anyway. - */ - BUG_ON(((vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT) == 0); - - /* Top two bits non-zero? Unconditional. */ - if (vcpu->arch.hsr >> 30) - return true; - - cpsr = *vcpu_cpsr(vcpu); - - /* Is condition field valid? */ - if ((vcpu->arch.hsr & HSR_CV) >> HSR_CV_SHIFT) - cond = (vcpu->arch.hsr & HSR_COND) >> HSR_COND_SHIFT; - else { - /* This can happen in Thumb mode: examine IT state. */ - unsigned long it; - - it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3); - - /* it == 0 => unconditional. */ - if (it == 0) - return true; - - /* The cond for this insn works out as the top 4 bits. */ - cond = (it >> 4); - } - - /* Shift makes it look like an ARM-mode instruction */ - insn = cond << 28; - return arm_check_condition(insn, cpsr) != ARM_OPCODE_CONDTEST_FAIL; -} - -/* - * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on - * proper exit to QEMU. - */ -static int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, - int exception_index) -{ - unsigned long hsr_ec; - - switch (exception_index) { - case ARM_EXCEPTION_IRQ: - return 1; - case ARM_EXCEPTION_UNDEFINED: - kvm_err("Undefined exception in Hyp mode at: %#08x\n", - vcpu->arch.hyp_pc); - BUG(); - panic("KVM: Hypervisor undefined exception!\n"); - case ARM_EXCEPTION_DATA_ABORT: - case ARM_EXCEPTION_PREF_ABORT: - case ARM_EXCEPTION_HVC: - hsr_ec = (vcpu->arch.hsr & HSR_EC) >> HSR_EC_SHIFT; - - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) - || !arm_exit_handlers[hsr_ec]) { - kvm_err("Unkown exception class: %#08lx, " - "hsr: %#08x\n", hsr_ec, - (unsigned int)vcpu->arch.hsr); - BUG(); - } - - /* - * See ARM ARM B1.14.1: "Hyp traps on instructions - * that fail their condition code check" - */ - if (!kvm_condition_valid(vcpu)) { - bool is_wide = vcpu->arch.hsr & HSR_IL; - kvm_skip_instr(vcpu, is_wide); - return 1; - } - - return arm_exit_handlers[hsr_ec](vcpu, run); - default: - kvm_pr_unimpl("Unsupported exception type: %d", - exception_index); - run->exit_reason = KVM_EXIT_INTERNAL_ERROR; - return 0; - } -} - -static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu) -{ - if (likely(vcpu->arch.has_run_once)) - return 0; - - vcpu->arch.has_run_once = true; - - /* - * Handle the "start in power-off" case by calling into the - * PSCI code. - */ - if (test_and_clear_bit(KVM_ARM_VCPU_POWER_OFF, vcpu->arch.features)) { - *vcpu_reg(vcpu, 0) = KVM_PSCI_FN_CPU_OFF; - kvm_psci_call(vcpu); - } - - return 0; -} - -static void vcpu_pause(struct kvm_vcpu *vcpu) -{ - wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); - - wait_event_interruptible(*wq, !vcpu->arch.pause); -} - -/** - * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code - * @vcpu: The VCPU pointer - * @run: The kvm_run structure pointer used for userspace state exchange - * - * This function is called through the VCPU_RUN ioctl called from user space. It - * will execute VM code in a loop until the time slice for the process is used - * or some emulation is needed from user space in which case the function will - * return with return value 0 and with the kvm_run structure filled in with the - * required data for the requested emulation. - */ -int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - int ret; - sigset_t sigsaved; - - /* Make sure they initialize the vcpu with KVM_ARM_VCPU_INIT */ - if (unlikely(vcpu->arch.target < 0)) - return -ENOEXEC; - - ret = kvm_vcpu_first_run_init(vcpu); - if (ret) - return ret; - - if (run->exit_reason == KVM_EXIT_MMIO) { - ret = kvm_handle_mmio_return(vcpu, vcpu->run); - if (ret) - return ret; - } - - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); - - ret = 1; - run->exit_reason = KVM_EXIT_UNKNOWN; - while (ret > 0) { - /* - * Check conditions before entering the guest - */ - cond_resched(); - - update_vttbr(vcpu->kvm); - - if (vcpu->arch.pause) - vcpu_pause(vcpu); - - local_irq_disable(); - - /* - * Re-check atomic conditions - */ - if (signal_pending(current)) { - ret = -EINTR; - run->exit_reason = KVM_EXIT_INTR; - } - - if (ret <= 0 || need_new_vmid_gen(vcpu->kvm)) { - local_irq_enable(); - continue; - } - - /************************************************************** - * Enter the guest - */ - trace_kvm_entry(*vcpu_pc(vcpu)); - kvm_guest_enter(); - vcpu->mode = IN_GUEST_MODE; - - ret = kvm_call_hyp(__kvm_vcpu_run, vcpu); - - vcpu->mode = OUTSIDE_GUEST_MODE; - vcpu->arch.last_pcpu = smp_processor_id(); - kvm_guest_exit(); - trace_kvm_exit(*vcpu_pc(vcpu)); - /* - * We may have taken a host interrupt in HYP mode (ie - * while executing the guest). This interrupt is still - * pending, as we haven't serviced it yet! - * - * We're now back in SVC mode, with interrupts - * disabled. Enabling the interrupts now will have - * the effect of taking the interrupt again, in SVC - * mode this time. - */ - local_irq_enable(); - - /* - * Back from guest - *************************************************************/ - - ret = handle_exit(vcpu, run, ret); - } - - if (vcpu->sigset_active) - sigprocmask(SIG_SETMASK, &sigsaved, NULL); - return ret; -} - -static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) -{ - int bit_index; - bool set; - unsigned long *ptr; - - if (number == KVM_ARM_IRQ_CPU_IRQ) - bit_index = __ffs(HCR_VI); - else /* KVM_ARM_IRQ_CPU_FIQ */ - bit_index = __ffs(HCR_VF); - - ptr = (unsigned long *)&vcpu->arch.irq_lines; - if (level) - set = test_and_set_bit(bit_index, ptr); - else - set = test_and_clear_bit(bit_index, ptr); - - /* - * If we didn't change anything, no need to wake up or kick other CPUs - */ - if (set == level) - return 0; - - /* - * The vcpu irq_lines field was updated, wake up sleeping VCPUs and - * trigger a world-switch round on the running physical CPU to set the - * virtual IRQ/FIQ fields in the HCR appropriately. - */ - kvm_vcpu_kick(vcpu); - - return 0; -} - -int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level) -{ - u32 irq = irq_level->irq; - unsigned int irq_type, vcpu_idx, irq_num; - int nrcpus = atomic_read(&kvm->online_vcpus); - struct kvm_vcpu *vcpu = NULL; - bool level = irq_level->level; - - irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; - vcpu_idx = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; - irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; - - trace_kvm_irq_line(irq_type, vcpu_idx, irq_num, irq_level->level); - - if (irq_type != KVM_ARM_IRQ_TYPE_CPU) - return -EINVAL; - - if (vcpu_idx >= nrcpus) - return -EINVAL; - - vcpu = kvm_get_vcpu(kvm, vcpu_idx); - if (!vcpu) - return -EINVAL; - - if (irq_num > KVM_ARM_IRQ_CPU_FIQ) - return -EINVAL; - - return vcpu_interrupt_line(vcpu, irq_num, level); -} - -long kvm_arch_vcpu_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - struct kvm_vcpu *vcpu = filp->private_data; - void __user *argp = (void __user *)arg; - - switch (ioctl) { - case KVM_ARM_VCPU_INIT: { - struct kvm_vcpu_init init; - - if (copy_from_user(&init, argp, sizeof(init))) - return -EFAULT; - - return kvm_vcpu_set_target(vcpu, &init); - - } - case KVM_SET_ONE_REG: - case KVM_GET_ONE_REG: { - struct kvm_one_reg reg; - if (copy_from_user(®, argp, sizeof(reg))) - return -EFAULT; - if (ioctl == KVM_SET_ONE_REG) - return kvm_arm_set_reg(vcpu, ®); - else - return kvm_arm_get_reg(vcpu, ®); - } - case KVM_GET_REG_LIST: { - struct kvm_reg_list __user *user_list = argp; - struct kvm_reg_list reg_list; - unsigned n; - - if (copy_from_user(®_list, user_list, sizeof(reg_list))) - return -EFAULT; - n = reg_list.n; - reg_list.n = kvm_arm_num_regs(vcpu); - if (copy_to_user(user_list, ®_list, sizeof(reg_list))) - return -EFAULT; - if (n < reg_list.n) - return -E2BIG; - return kvm_arm_copy_reg_indices(vcpu, user_list->reg); - } - default: - return -EINVAL; - } -} - -int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) -{ - return -EINVAL; -} - -long kvm_arch_vm_ioctl(struct file *filp, - unsigned int ioctl, unsigned long arg) -{ - return -EINVAL; -} - -static void cpu_init_hyp_mode(void *vector) -{ - unsigned long long pgd_ptr; - unsigned long pgd_low, pgd_high; - unsigned long hyp_stack_ptr; - unsigned long stack_page; - unsigned long vector_ptr; - - /* Switch from the HYP stub to our own HYP init vector */ - __hyp_set_vectors((unsigned long)vector); - - pgd_ptr = (unsigned long long)kvm_mmu_get_httbr(); - pgd_low = (pgd_ptr & ((1ULL << 32) - 1)); - pgd_high = (pgd_ptr >> 32ULL); - stack_page = __get_cpu_var(kvm_arm_hyp_stack_page); - hyp_stack_ptr = stack_page + PAGE_SIZE; - vector_ptr = (unsigned long)__kvm_hyp_vector; - - /* - * Call initialization code, and switch to the full blown - * HYP code. The init code doesn't need to preserve these registers as - * r1-r3 and r12 are already callee save according to the AAPCS. - * Note that we slightly misuse the prototype by casing the pgd_low to - * a void *. - */ - kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr); -} - -/** - * Inits Hyp-mode on all online CPUs - */ -static int init_hyp_mode(void) -{ - phys_addr_t init_phys_addr; - int cpu; - int err = 0; - - /* - * Allocate Hyp PGD and setup Hyp identity mapping - */ - err = kvm_mmu_init(); - if (err) - goto out_err; - - /* - * It is probably enough to obtain the default on one - * CPU. It's unlikely to be different on the others. - */ - hyp_default_vectors = __hyp_get_vectors(); - - /* - * Allocate stack pages for Hypervisor-mode - */ - for_each_possible_cpu(cpu) { - unsigned long stack_page; - - stack_page = __get_free_page(GFP_KERNEL); - if (!stack_page) { - err = -ENOMEM; - goto out_free_stack_pages; - } - - per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page; - } - - /* - * Execute the init code on each CPU. - * - * Note: The stack is not mapped yet, so don't do anything else than - * initializing the hypervisor mode on each CPU using a local stack - * space for temporary storage. - */ - init_phys_addr = virt_to_phys(__kvm_hyp_init); - for_each_online_cpu(cpu) { - smp_call_function_single(cpu, cpu_init_hyp_mode, - (void *)(long)init_phys_addr, 1); - } - - /* - * Unmap the identity mapping - */ - kvm_clear_hyp_idmap(); - - /* - * Map the Hyp-code called directly from the host - */ - err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end); - if (err) { - kvm_err("Cannot map world-switch code\n"); - goto out_free_mappings; - } - - /* - * Map the Hyp stack pages - */ - for_each_possible_cpu(cpu) { - char *stack_page = (char *)per_cpu(kvm_arm_hyp_stack_page, cpu); - err = create_hyp_mappings(stack_page, stack_page + PAGE_SIZE); - - if (err) { - kvm_err("Cannot map hyp stack\n"); - goto out_free_mappings; - } - } - - /* - * Map the host VFP structures - */ - kvm_host_vfp_state = alloc_percpu(struct vfp_hard_struct); - if (!kvm_host_vfp_state) { - err = -ENOMEM; - kvm_err("Cannot allocate host VFP state\n"); - goto out_free_mappings; - } - - for_each_possible_cpu(cpu) { - struct vfp_hard_struct *vfp; - - vfp = per_cpu_ptr(kvm_host_vfp_state, cpu); - err = create_hyp_mappings(vfp, vfp + 1); - - if (err) { - kvm_err("Cannot map host VFP state: %d\n", err); - goto out_free_vfp; - } - } - - kvm_info("Hyp mode initialized successfully\n"); - return 0; -out_free_vfp: - free_percpu(kvm_host_vfp_state); -out_free_mappings: - free_hyp_pmds(); -out_free_stack_pages: - for_each_possible_cpu(cpu) - free_page(per_cpu(kvm_arm_hyp_stack_page, cpu)); -out_err: - kvm_err("error initializing Hyp mode: %d\n", err); - return err; -} - -/** - * Initialize Hyp-mode and memory mappings on all CPUs. - */ -int kvm_arch_init(void *opaque) -{ - int err; - - if (!is_hyp_mode_available()) { - kvm_err("HYP mode not available\n"); - return -ENODEV; - } - - if (kvm_target_cpu() < 0) { - kvm_err("Target CPU not supported!\n"); - return -ENODEV; - } - - err = init_hyp_mode(); - if (err) - goto out_err; - - kvm_coproc_table_init(); - return 0; -out_err: - return err; -} - -/* NOP: Compiling as a module not supported */ -void kvm_arch_exit(void) -{ -} - -static int arm_init(void) -{ - int rc = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); - return rc; -} - -module_init(arm_init); diff --git a/trunk/arch/arm/kvm/coproc.c b/trunk/arch/arm/kvm/coproc.c deleted file mode 100644 index d782638c7ec0..000000000000 --- a/trunk/arch/arm/kvm/coproc.c +++ /dev/null @@ -1,1046 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Rusty Russell - * Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "../vfp/vfpinstr.h" - -#include "trace.h" -#include "coproc.h" - - -/****************************************************************************** - * Co-processor emulation - *****************************************************************************/ - -/* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ -static u32 cache_levels; - -/* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ -#define CSSELR_MAX 12 - -int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - /* - * We can get here, if the host has been built without VFPv3 support, - * but the guest attempted a floating point operation. - */ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - kvm_inject_undefined(vcpu); - return 1; -} - -/* See note at ARM ARM B1.14.4 */ -static bool access_dcsw(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - u32 val; - int cpu; - - cpu = get_cpu(); - - if (!p->is_write) - return read_from_write_only(vcpu, p); - - cpumask_setall(&vcpu->arch.require_dcache_flush); - cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); - - /* If we were already preempted, take the long way around */ - if (cpu != vcpu->arch.last_pcpu) { - flush_cache_all(); - goto done; - } - - val = *vcpu_reg(vcpu, p->Rt1); - - switch (p->CRm) { - case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ - case 14: /* DCCISW */ - asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); - break; - - case 10: /* DCCSW */ - asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); - break; - } - -done: - put_cpu(); - - return true; -} - -/* - * We could trap ID_DFR0 and tell the guest we don't support performance - * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was - * NAKed, so it will read the PMCR anyway. - * - * Therefore we tell the guest we have 0 counters. Unfortunately, we - * must always support PMCCNTR (the cycle counter): we just RAZ/WI for - * all PM registers, which doesn't crash the guest kernel at least. - */ -static bool pm_fake(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - else - return read_zero(vcpu, p); -} - -#define access_pmcr pm_fake -#define access_pmcntenset pm_fake -#define access_pmcntenclr pm_fake -#define access_pmovsr pm_fake -#define access_pmselr pm_fake -#define access_pmceid0 pm_fake -#define access_pmceid1 pm_fake -#define access_pmccntr pm_fake -#define access_pmxevtyper pm_fake -#define access_pmxevcntr pm_fake -#define access_pmuserenr pm_fake -#define access_pmintenset pm_fake -#define access_pmintenclr pm_fake - -/* Architected CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 - */ -static const struct coproc_reg cp15_regs[] = { - /* CSSELR: swapped by interrupt.S. */ - { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, - NULL, reset_unknown, c0_CSSELR }, - - /* TTBR0/TTBR1: swapped by interrupt.S. */ - { CRm( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, - { CRm( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, - - /* TTBCR: swapped by interrupt.S. */ - { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_val, c2_TTBCR, 0x00000000 }, - - /* DACR: swapped by interrupt.S. */ - { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c3_DACR }, - - /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ - { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c5_DFSR }, - { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c5_IFSR }, - { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c5_ADFSR }, - { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c5_AIFSR }, - - /* DFAR/IFAR: swapped by interrupt.S. */ - { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c6_DFAR }, - { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_unknown, c6_IFAR }, - /* - * DC{C,I,CI}SW operations: - */ - { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, - { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, - { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, - /* - * Dummy performance monitor implementation. - */ - { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, - { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, - { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, - { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, - { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, - { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, - { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, - { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, - { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, - { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, - - /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ - { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, - NULL, reset_unknown, c10_PRRR}, - { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, - NULL, reset_unknown, c10_NMRR}, - - /* VBAR: swapped by interrupt.S. */ - { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_val, c12_VBAR, 0x00000000 }, - - /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ - { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, - NULL, reset_val, c13_CID, 0x00000000 }, - { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_unknown, c13_TID_URW }, - { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, - NULL, reset_unknown, c13_TID_URO }, - { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, - NULL, reset_unknown, c13_TID_PRIV }, -}; - -/* Target specific emulation tables */ -static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; - -void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) -{ - target_tables[table->target] = table; -} - -/* Get specific register table for this target. */ -static const struct coproc_reg *get_target_table(unsigned target, size_t *num) -{ - struct kvm_coproc_target_table *table; - - table = target_tables[target]; - *num = table->num; - return table->table; -} - -static const struct coproc_reg *find_reg(const struct coproc_params *params, - const struct coproc_reg table[], - unsigned int num) -{ - unsigned int i; - - for (i = 0; i < num; i++) { - const struct coproc_reg *r = &table[i]; - - if (params->is_64bit != r->is_64) - continue; - if (params->CRn != r->CRn) - continue; - if (params->CRm != r->CRm) - continue; - if (params->Op1 != r->Op1) - continue; - if (params->Op2 != r->Op2) - continue; - - return r; - } - return NULL; -} - -static int emulate_cp15(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - size_t num; - const struct coproc_reg *table, *r; - - trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, - params->CRm, params->Op2, params->is_write); - - table = get_target_table(vcpu->arch.target, &num); - - /* Search target-specific then generic table. */ - r = find_reg(params, table, num); - if (!r) - r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); - - if (likely(r)) { - /* If we don't have an accessor, we should never get here! */ - BUG_ON(!r->access); - - if (likely(r->access(vcpu, params, r))) { - /* Skip instruction, since it was emulated */ - kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); - return 1; - } - /* If access function fails, it should complain. */ - } else { - kvm_err("Unsupported guest CP15 access at: %08x\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - } - kvm_inject_undefined(vcpu); - return 1; -} - -/** - * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params; - - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; - params.is_write = ((vcpu->arch.hsr & 1) == 0); - params.is_64bit = true; - - params.Op1 = (vcpu->arch.hsr >> 16) & 0xf; - params.Op2 = 0; - params.Rt2 = (vcpu->arch.hsr >> 10) & 0xf; - params.CRn = 0; - - return emulate_cp15(vcpu, ¶ms); -} - -static void reset_coproc_regs(struct kvm_vcpu *vcpu, - const struct coproc_reg *table, size_t num) -{ - unsigned long i; - - for (i = 0; i < num; i++) - if (table[i].reset) - table[i].reset(vcpu, &table[i]); -} - -/** - * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access - * @vcpu: The VCPU pointer - * @run: The kvm_run struct - */ -int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - struct coproc_params params; - - params.CRm = (vcpu->arch.hsr >> 1) & 0xf; - params.Rt1 = (vcpu->arch.hsr >> 5) & 0xf; - params.is_write = ((vcpu->arch.hsr & 1) == 0); - params.is_64bit = false; - - params.CRn = (vcpu->arch.hsr >> 10) & 0xf; - params.Op1 = (vcpu->arch.hsr >> 14) & 0x7; - params.Op2 = (vcpu->arch.hsr >> 17) & 0x7; - params.Rt2 = 0; - - return emulate_cp15(vcpu, ¶ms); -} - -/****************************************************************************** - * Userspace API - *****************************************************************************/ - -static bool index_to_params(u64 id, struct coproc_params *params) -{ - switch (id & KVM_REG_SIZE_MASK) { - case KVM_REG_SIZE_U32: - /* Any unused index bits means it's not valid. */ - if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK - | KVM_REG_ARM_COPROC_MASK - | KVM_REG_ARM_32_CRN_MASK - | KVM_REG_ARM_CRM_MASK - | KVM_REG_ARM_OPC1_MASK - | KVM_REG_ARM_32_OPC2_MASK)) - return false; - - params->is_64bit = false; - params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) - >> KVM_REG_ARM_32_CRN_SHIFT); - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) - >> KVM_REG_ARM_CRM_SHIFT); - params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) - >> KVM_REG_ARM_OPC1_SHIFT); - params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) - >> KVM_REG_ARM_32_OPC2_SHIFT); - return true; - case KVM_REG_SIZE_U64: - /* Any unused index bits means it's not valid. */ - if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK - | KVM_REG_ARM_COPROC_MASK - | KVM_REG_ARM_CRM_MASK - | KVM_REG_ARM_OPC1_MASK)) - return false; - params->is_64bit = true; - params->CRm = ((id & KVM_REG_ARM_CRM_MASK) - >> KVM_REG_ARM_CRM_SHIFT); - params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) - >> KVM_REG_ARM_OPC1_SHIFT); - params->Op2 = 0; - params->CRn = 0; - return true; - default: - return false; - } -} - -/* Decode an index value, and find the cp15 coproc_reg entry. */ -static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, - u64 id) -{ - size_t num; - const struct coproc_reg *table, *r; - struct coproc_params params; - - /* We only do cp15 for now. */ - if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) - return NULL; - - if (!index_to_params(id, ¶ms)) - return NULL; - - table = get_target_table(vcpu->arch.target, &num); - r = find_reg(¶ms, table, num); - if (!r) - r = find_reg(¶ms, cp15_regs, ARRAY_SIZE(cp15_regs)); - - /* Not saved in the cp15 array? */ - if (r && !r->reg) - r = NULL; - - return r; -} - -/* - * These are the invariant cp15 registers: we let the guest see the host - * versions of these, so they're part of the guest state. - * - * A future CPU may provide a mechanism to present different values to - * the guest, or a future kvm may trap them. - */ -/* Unfortunately, there's no register-argument for mrc, so generate. */ -#define FUNCTION_FOR32(crn, crm, op1, op2, name) \ - static void get_##name(struct kvm_vcpu *v, \ - const struct coproc_reg *r) \ - { \ - u32 val; \ - \ - asm volatile("mrc p15, " __stringify(op1) \ - ", %0, c" __stringify(crn) \ - ", c" __stringify(crm) \ - ", " __stringify(op2) "\n" : "=r" (val)); \ - ((struct coproc_reg *)r)->val = val; \ - } - -FUNCTION_FOR32(0, 0, 0, 0, MIDR) -FUNCTION_FOR32(0, 0, 0, 1, CTR) -FUNCTION_FOR32(0, 0, 0, 2, TCMTR) -FUNCTION_FOR32(0, 0, 0, 3, TLBTR) -FUNCTION_FOR32(0, 0, 0, 6, REVIDR) -FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) -FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) -FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) -FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) -FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) -FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) -FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) -FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) -FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) -FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) -FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) -FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) -FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) -FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) -FUNCTION_FOR32(0, 0, 1, 1, CLIDR) -FUNCTION_FOR32(0, 0, 1, 7, AIDR) - -/* ->val is filled in by kvm_invariant_coproc_table_init() */ -static struct coproc_reg invariant_cp15[] = { - { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, - { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, - - { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, - { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, - - { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, - { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, - - { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, - { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, -}; - -static int reg_from_user(void *val, const void __user *uaddr, u64 id) -{ - /* This Just Works because we are little endian. */ - if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) - return -EFAULT; - return 0; -} - -static int reg_to_user(void __user *uaddr, const void *val, u64 id) -{ - /* This Just Works because we are little endian. */ - if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) - return -EFAULT; - return 0; -} - -static int get_invariant_cp15(u64 id, void __user *uaddr) -{ - struct coproc_params params; - const struct coproc_reg *r; - - if (!index_to_params(id, ¶ms)) - return -ENOENT; - - r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); - if (!r) - return -ENOENT; - - return reg_to_user(uaddr, &r->val, id); -} - -static int set_invariant_cp15(u64 id, void __user *uaddr) -{ - struct coproc_params params; - const struct coproc_reg *r; - int err; - u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ - - if (!index_to_params(id, ¶ms)) - return -ENOENT; - r = find_reg(¶ms, invariant_cp15, ARRAY_SIZE(invariant_cp15)); - if (!r) - return -ENOENT; - - err = reg_from_user(&val, uaddr, id); - if (err) - return err; - - /* This is what we mean by invariant: you can't change it. */ - if (r->val != val) - return -EINVAL; - - return 0; -} - -static bool is_valid_cache(u32 val) -{ - u32 level, ctype; - - if (val >= CSSELR_MAX) - return -ENOENT; - - /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ - level = (val >> 1); - ctype = (cache_levels >> (level * 3)) & 7; - - switch (ctype) { - case 0: /* No cache */ - return false; - case 1: /* Instruction cache only */ - return (val & 1); - case 2: /* Data cache only */ - case 4: /* Unified cache */ - return !(val & 1); - case 3: /* Separate instruction and data caches */ - return true; - default: /* Reserved: we can't know instruction or data. */ - return false; - } -} - -/* Which cache CCSIDR represents depends on CSSELR value. */ -static u32 get_ccsidr(u32 csselr) -{ - u32 ccsidr; - - /* Make sure noone else changes CSSELR during this! */ - local_irq_disable(); - /* Put value into CSSELR */ - asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); - isb(); - /* Read result out of CCSIDR */ - asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); - local_irq_enable(); - - return ccsidr; -} - -static int demux_c15_get(u64 id, void __user *uaddr) -{ - u32 val; - u32 __user *uval = uaddr; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { - case KVM_REG_ARM_DEMUX_ID_CCSIDR: - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) - >> KVM_REG_ARM_DEMUX_VAL_SHIFT; - if (!is_valid_cache(val)) - return -ENOENT; - - return put_user(get_ccsidr(val), uval); - default: - return -ENOENT; - } -} - -static int demux_c15_set(u64 id, void __user *uaddr) -{ - u32 val, newval; - u32 __user *uval = uaddr; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { - case KVM_REG_ARM_DEMUX_ID_CCSIDR: - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) - >> KVM_REG_ARM_DEMUX_VAL_SHIFT; - if (!is_valid_cache(val)) - return -ENOENT; - - if (get_user(newval, uval)) - return -EFAULT; - - /* This is also invariant: you can't change it. */ - if (newval != get_ccsidr(val)) - return -EINVAL; - return 0; - default: - return -ENOENT; - } -} - -#ifdef CONFIG_VFPv3 -static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, - KVM_REG_ARM_VFP_FPSCR, - KVM_REG_ARM_VFP_FPINST, - KVM_REG_ARM_VFP_FPINST2, - KVM_REG_ARM_VFP_MVFR0, - KVM_REG_ARM_VFP_MVFR1, - KVM_REG_ARM_VFP_FPSID }; - -static unsigned int num_fp_regs(void) -{ - if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) - return 32; - else - return 16; -} - -static unsigned int num_vfp_regs(void) -{ - /* Normal FP regs + control regs. */ - return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); -} - -static int copy_vfp_regids(u64 __user *uindices) -{ - unsigned int i; - const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; - const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; - - for (i = 0; i < num_fp_regs(); i++) { - if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, - uindices)) - return -EFAULT; - uindices++; - } - - for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { - if (put_user(u32reg | vfp_sysregs[i], uindices)) - return -EFAULT; - uindices++; - } - - return num_vfp_regs(); -} - -static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) -{ - u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); - u32 val; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - if (vfpid < num_fp_regs()) { - if (KVM_REG_SIZE(id) != 8) - return -ENOENT; - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], - id); - } - - /* FP control registers are all 32 bit. */ - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - - switch (vfpid) { - case KVM_REG_ARM_VFP_FPEXC: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); - case KVM_REG_ARM_VFP_FPSCR: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); - case KVM_REG_ARM_VFP_FPINST: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); - case KVM_REG_ARM_VFP_FPINST2: - return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); - case KVM_REG_ARM_VFP_MVFR0: - val = fmrx(MVFR0); - return reg_to_user(uaddr, &val, id); - case KVM_REG_ARM_VFP_MVFR1: - val = fmrx(MVFR1); - return reg_to_user(uaddr, &val, id); - case KVM_REG_ARM_VFP_FPSID: - val = fmrx(FPSID); - return reg_to_user(uaddr, &val, id); - default: - return -ENOENT; - } -} - -static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) -{ - u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); - u32 val; - - /* Fail if we have unknown bits set. */ - if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK - | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) - return -ENOENT; - - if (vfpid < num_fp_regs()) { - if (KVM_REG_SIZE(id) != 8) - return -ENOENT; - return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], - uaddr, id); - } - - /* FP control registers are all 32 bit. */ - if (KVM_REG_SIZE(id) != 4) - return -ENOENT; - - switch (vfpid) { - case KVM_REG_ARM_VFP_FPEXC: - return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); - case KVM_REG_ARM_VFP_FPSCR: - return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); - case KVM_REG_ARM_VFP_FPINST: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); - case KVM_REG_ARM_VFP_FPINST2: - return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); - /* These are invariant. */ - case KVM_REG_ARM_VFP_MVFR0: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(MVFR0)) - return -EINVAL; - return 0; - case KVM_REG_ARM_VFP_MVFR1: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(MVFR1)) - return -EINVAL; - return 0; - case KVM_REG_ARM_VFP_FPSID: - if (reg_from_user(&val, uaddr, id)) - return -EFAULT; - if (val != fmrx(FPSID)) - return -EINVAL; - return 0; - default: - return -ENOENT; - } -} -#else /* !CONFIG_VFPv3 */ -static unsigned int num_vfp_regs(void) -{ - return 0; -} - -static int copy_vfp_regids(u64 __user *uindices) -{ - return 0; -} - -static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) -{ - return -ENOENT; -} - -static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) -{ - return -ENOENT; -} -#endif /* !CONFIG_VFPv3 */ - -int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - const struct coproc_reg *r; - void __user *uaddr = (void __user *)(long)reg->addr; - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) - return demux_c15_get(reg->id, uaddr); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) - return vfp_get_reg(vcpu, reg->id, uaddr); - - r = index_to_coproc_reg(vcpu, reg->id); - if (!r) - return get_invariant_cp15(reg->id, uaddr); - - /* Note: copies two regs if size is 64 bit. */ - return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); -} - -int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - const struct coproc_reg *r; - void __user *uaddr = (void __user *)(long)reg->addr; - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) - return demux_c15_set(reg->id, uaddr); - - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) - return vfp_set_reg(vcpu, reg->id, uaddr); - - r = index_to_coproc_reg(vcpu, reg->id); - if (!r) - return set_invariant_cp15(reg->id, uaddr); - - /* Note: copies two regs if size is 64 bit */ - return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); -} - -static unsigned int num_demux_regs(void) -{ - unsigned int i, count = 0; - - for (i = 0; i < CSSELR_MAX; i++) - if (is_valid_cache(i)) - count++; - - return count; -} - -static int write_demux_regids(u64 __user *uindices) -{ - u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; - unsigned int i; - - val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; - for (i = 0; i < CSSELR_MAX; i++) { - if (!is_valid_cache(i)) - continue; - if (put_user(val | i, uindices)) - return -EFAULT; - uindices++; - } - return 0; -} - -static u64 cp15_to_index(const struct coproc_reg *reg) -{ - u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); - if (reg->is_64) { - val |= KVM_REG_SIZE_U64; - val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); - } else { - val |= KVM_REG_SIZE_U32; - val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); - val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); - val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); - val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); - } - return val; -} - -static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) -{ - if (!*uind) - return true; - - if (put_user(cp15_to_index(reg), *uind)) - return false; - - (*uind)++; - return true; -} - -/* Assumed ordered tables, see kvm_coproc_table_init. */ -static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) -{ - const struct coproc_reg *i1, *i2, *end1, *end2; - unsigned int total = 0; - size_t num; - - /* We check for duplicates here, to allow arch-specific overrides. */ - i1 = get_target_table(vcpu->arch.target, &num); - end1 = i1 + num; - i2 = cp15_regs; - end2 = cp15_regs + ARRAY_SIZE(cp15_regs); - - BUG_ON(i1 == end1 || i2 == end2); - - /* Walk carefully, as both tables may refer to the same register. */ - while (i1 || i2) { - int cmp = cmp_reg(i1, i2); - /* target-specific overrides generic entry. */ - if (cmp <= 0) { - /* Ignore registers we trap but don't save. */ - if (i1->reg) { - if (!copy_reg_to_user(i1, &uind)) - return -EFAULT; - total++; - } - } else { - /* Ignore registers we trap but don't save. */ - if (i2->reg) { - if (!copy_reg_to_user(i2, &uind)) - return -EFAULT; - total++; - } - } - - if (cmp <= 0 && ++i1 == end1) - i1 = NULL; - if (cmp >= 0 && ++i2 == end2) - i2 = NULL; - } - return total; -} - -unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) -{ - return ARRAY_SIZE(invariant_cp15) - + num_demux_regs() - + num_vfp_regs() - + walk_cp15(vcpu, (u64 __user *)NULL); -} - -int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int i; - int err; - - /* Then give them all the invariant registers' indices. */ - for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { - if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) - return -EFAULT; - uindices++; - } - - err = walk_cp15(vcpu, uindices); - if (err < 0) - return err; - uindices += err; - - err = copy_vfp_regids(uindices); - if (err < 0) - return err; - uindices += err; - - return write_demux_regids(uindices); -} - -void kvm_coproc_table_init(void) -{ - unsigned int i; - - /* Make sure tables are unique and in order. */ - for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) - BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); - - /* We abuse the reset function to overwrite the table itself. */ - for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) - invariant_cp15[i].reset(NULL, &invariant_cp15[i]); - - /* - * CLIDR format is awkward, so clean it up. See ARM B4.1.20: - * - * If software reads the Cache Type fields from Ctype1 - * upwards, once it has seen a value of 0b000, no caches - * exist at further-out levels of the hierarchy. So, for - * example, if Ctype3 is the first Cache Type field with a - * value of 0b000, the values of Ctype4 to Ctype7 must be - * ignored. - */ - asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); - for (i = 0; i < 7; i++) - if (((cache_levels >> (i*3)) & 7) == 0) - break; - /* Clear all higher bits. */ - cache_levels &= (1 << (i*3))-1; -} - -/** - * kvm_reset_coprocs - sets cp15 registers to reset value - * @vcpu: The VCPU pointer - * - * This function finds the right table above and sets the registers on the - * virtual CPU struct to their architecturally defined reset values. - */ -void kvm_reset_coprocs(struct kvm_vcpu *vcpu) -{ - size_t num; - const struct coproc_reg *table; - - /* Catch someone adding a register without putting in reset entry. */ - memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); - - /* Generic chip reset first (so target could override). */ - reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); - - table = get_target_table(vcpu->arch.target, &num); - reset_coproc_regs(vcpu, table, num); - - for (num = 1; num < NR_CP15_REGS; num++) - if (vcpu->arch.cp15[num] == 0x42424242) - panic("Didn't reset vcpu->arch.cp15[%zi]", num); -} diff --git a/trunk/arch/arm/kvm/coproc.h b/trunk/arch/arm/kvm/coproc.h deleted file mode 100644 index 992adfafa2ff..000000000000 --- a/trunk/arch/arm/kvm/coproc.h +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#ifndef __ARM_KVM_COPROC_LOCAL_H__ -#define __ARM_KVM_COPROC_LOCAL_H__ - -struct coproc_params { - unsigned long CRn; - unsigned long CRm; - unsigned long Op1; - unsigned long Op2; - unsigned long Rt1; - unsigned long Rt2; - bool is_64bit; - bool is_write; -}; - -struct coproc_reg { - /* MRC/MCR/MRRC/MCRR instruction which accesses it. */ - unsigned long CRn; - unsigned long CRm; - unsigned long Op1; - unsigned long Op2; - - bool is_64; - - /* Trapped access from guest, if non-NULL. */ - bool (*access)(struct kvm_vcpu *, - const struct coproc_params *, - const struct coproc_reg *); - - /* Initialization for vcpu. */ - void (*reset)(struct kvm_vcpu *, const struct coproc_reg *); - - /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */ - unsigned long reg; - - /* Value (usually reset value) */ - u64 val; -}; - -static inline void print_cp_instr(const struct coproc_params *p) -{ - /* Look, we even formatted it for you to paste into the table! */ - if (p->is_64bit) { - kvm_pr_unimpl(" { CRm(%2lu), Op1(%2lu), is64, func_%s },\n", - p->CRm, p->Op1, p->is_write ? "write" : "read"); - } else { - kvm_pr_unimpl(" { CRn(%2lu), CRm(%2lu), Op1(%2lu), Op2(%2lu), is32," - " func_%s },\n", - p->CRn, p->CRm, p->Op1, p->Op2, - p->is_write ? "write" : "read"); - } -} - -static inline bool ignore_write(struct kvm_vcpu *vcpu, - const struct coproc_params *p) -{ - return true; -} - -static inline bool read_zero(struct kvm_vcpu *vcpu, - const struct coproc_params *p) -{ - *vcpu_reg(vcpu, p->Rt1) = 0; - return true; -} - -static inline bool write_to_read_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - kvm_debug("CP15 write to read-only register at: %08x\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - return false; -} - -static inline bool read_from_write_only(struct kvm_vcpu *vcpu, - const struct coproc_params *params) -{ - kvm_debug("CP15 read to write-only register at: %08x\n", - *vcpu_pc(vcpu)); - print_cp_instr(params); - return false; -} - -/* Reset functions */ -static inline void reset_unknown(struct kvm_vcpu *vcpu, - const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = 0xdecafbad; -} - -static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15)); - vcpu->arch.cp15[r->reg] = r->val; -} - -static inline void reset_unknown64(struct kvm_vcpu *vcpu, - const struct coproc_reg *r) -{ - BUG_ON(!r->reg); - BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15)); - - vcpu->arch.cp15[r->reg] = 0xdecafbad; - vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee; -} - -static inline int cmp_reg(const struct coproc_reg *i1, - const struct coproc_reg *i2) -{ - BUG_ON(i1 == i2); - if (!i1) - return 1; - else if (!i2) - return -1; - if (i1->CRn != i2->CRn) - return i1->CRn - i2->CRn; - if (i1->CRm != i2->CRm) - return i1->CRm - i2->CRm; - if (i1->Op1 != i2->Op1) - return i1->Op1 - i2->Op1; - return i1->Op2 - i2->Op2; -} - - -#define CRn(_x) .CRn = _x -#define CRm(_x) .CRm = _x -#define Op1(_x) .Op1 = _x -#define Op2(_x) .Op2 = _x -#define is64 .is_64 = true -#define is32 .is_64 = false - -#endif /* __ARM_KVM_COPROC_LOCAL_H__ */ diff --git a/trunk/arch/arm/kvm/coproc_a15.c b/trunk/arch/arm/kvm/coproc_a15.c deleted file mode 100644 index 685063a6d0cf..000000000000 --- a/trunk/arch/arm/kvm/coproc_a15.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Authors: Rusty Russell - * Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -#include -#include -#include -#include -#include -#include -#include - -static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - /* - * Compute guest MPIDR: - * (Even if we present only one VCPU to the guest on an SMP - * host we don't set the U bit in the MPIDR, or vice versa, as - * revealing the underlying hardware properties is likely to - * be the best choice). - */ - vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK) - | (vcpu->vcpu_id & MPIDR_LEVEL_MASK); -} - -#include "coproc.h" - -/* A15 TRM 4.3.28: RO WI */ -static bool access_actlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; - return true; -} - -/* A15 TRM 4.3.60: R/O. */ -static bool access_cbar(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return write_to_read_only(vcpu, p); - return read_zero(vcpu, p); -} - -/* A15 TRM 4.3.48: R/O WI. */ -static bool access_l2ctlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; - return true; -} - -static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 l2ctlr, ncores; - - asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); - l2ctlr &= ~(3 << 24); - ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; - l2ctlr |= (ncores & 3) << 24; - - vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; -} - -static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) -{ - u32 actlr; - - /* ACTLR contains SMP bit: make sure you create all cpus first! */ - asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); - /* Make the SMP bit consistent with the guest configuration */ - if (atomic_read(&vcpu->kvm->online_vcpus) > 1) - actlr |= 1U << 6; - else - actlr &= ~(1U << 6); - - vcpu->arch.cp15[c1_ACTLR] = actlr; -} - -/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */ -static bool access_l2ectlr(struct kvm_vcpu *vcpu, - const struct coproc_params *p, - const struct coproc_reg *r) -{ - if (p->is_write) - return ignore_write(vcpu, p); - - *vcpu_reg(vcpu, p->Rt1) = 0; - return true; -} - -/* - * A15-specific CP15 registers. - * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 - */ -static const struct coproc_reg a15_regs[] = { - /* MPIDR: we use VMPIDR for guest access. */ - { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, - NULL, reset_mpidr, c0_MPIDR }, - - /* SCTLR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32, - NULL, reset_val, c1_SCTLR, 0x00C50078 }, - /* ACTLR: trapped by HCR.TAC bit. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, - access_actlr, reset_actlr, c1_ACTLR }, - /* CPACR: swapped by interrupt.S. */ - { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, - NULL, reset_val, c1_CPACR, 0x00000000 }, - - /* - * L2CTLR access (guest wants to know #CPUs). - */ - { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, - access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, - { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, - - /* The Configuration Base Address Register. */ - { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, -}; - -static struct kvm_coproc_target_table a15_target_table = { - .target = KVM_ARM_TARGET_CORTEX_A15, - .table = a15_regs, - .num = ARRAY_SIZE(a15_regs), -}; - -static int __init coproc_a15_init(void) -{ - unsigned int i; - - for (i = 1; i < ARRAY_SIZE(a15_regs); i++) - BUG_ON(cmp_reg(&a15_regs[i-1], - &a15_regs[i]) >= 0); - - kvm_register_target_coproc_table(&a15_target_table); - return 0; -} -late_initcall(coproc_a15_init); diff --git a/trunk/arch/arm/kvm/emulate.c b/trunk/arch/arm/kvm/emulate.c deleted file mode 100644 index d61450ac6665..000000000000 --- a/trunk/arch/arm/kvm/emulate.c +++ /dev/null @@ -1,373 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include - -#include "trace.h" - -#define VCPU_NR_MODES 6 -#define VCPU_REG_OFFSET_USR 0 -#define VCPU_REG_OFFSET_FIQ 1 -#define VCPU_REG_OFFSET_IRQ 2 -#define VCPU_REG_OFFSET_SVC 3 -#define VCPU_REG_OFFSET_ABT 4 -#define VCPU_REG_OFFSET_UND 5 -#define REG_OFFSET(_reg) \ - (offsetof(struct kvm_regs, _reg) / sizeof(u32)) - -#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num]) - -static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = { - /* USR/SYS Registers */ - [VCPU_REG_OFFSET_USR] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14), - }, - - /* FIQ Registers */ - [VCPU_REG_OFFSET_FIQ] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), - REG_OFFSET(fiq_regs[0]), /* r8 */ - REG_OFFSET(fiq_regs[1]), /* r9 */ - REG_OFFSET(fiq_regs[2]), /* r10 */ - REG_OFFSET(fiq_regs[3]), /* r11 */ - REG_OFFSET(fiq_regs[4]), /* r12 */ - REG_OFFSET(fiq_regs[5]), /* r13 */ - REG_OFFSET(fiq_regs[6]), /* r14 */ - }, - - /* IRQ Registers */ - [VCPU_REG_OFFSET_IRQ] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(irq_regs[0]), /* r13 */ - REG_OFFSET(irq_regs[1]), /* r14 */ - }, - - /* SVC Registers */ - [VCPU_REG_OFFSET_SVC] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(svc_regs[0]), /* r13 */ - REG_OFFSET(svc_regs[1]), /* r14 */ - }, - - /* ABT Registers */ - [VCPU_REG_OFFSET_ABT] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(abt_regs[0]), /* r13 */ - REG_OFFSET(abt_regs[1]), /* r14 */ - }, - - /* UND Registers */ - [VCPU_REG_OFFSET_UND] = { - USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2), - USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5), - USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8), - USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11), - USR_REG_OFFSET(12), - REG_OFFSET(und_regs[0]), /* r13 */ - REG_OFFSET(und_regs[1]), /* r14 */ - }, -}; - -/* - * Return a pointer to the register number valid in the current mode of - * the virtual CPU. - */ -u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num) -{ - u32 *reg_array = (u32 *)&vcpu->arch.regs; - u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; - - switch (mode) { - case USR_MODE...SVC_MODE: - mode &= ~MODE32_BIT; /* 0 ... 3 */ - break; - - case ABT_MODE: - mode = VCPU_REG_OFFSET_ABT; - break; - - case UND_MODE: - mode = VCPU_REG_OFFSET_UND; - break; - - case SYSTEM_MODE: - mode = VCPU_REG_OFFSET_USR; - break; - - default: - BUG(); - } - - return reg_array + vcpu_reg_offsets[mode][reg_num]; -} - -/* - * Return the SPSR for the current mode of the virtual CPU. - */ -u32 *vcpu_spsr(struct kvm_vcpu *vcpu) -{ - u32 mode = *vcpu_cpsr(vcpu) & MODE_MASK; - switch (mode) { - case SVC_MODE: - return &vcpu->arch.regs.KVM_ARM_SVC_spsr; - case ABT_MODE: - return &vcpu->arch.regs.KVM_ARM_ABT_spsr; - case UND_MODE: - return &vcpu->arch.regs.KVM_ARM_UND_spsr; - case IRQ_MODE: - return &vcpu->arch.regs.KVM_ARM_IRQ_spsr; - case FIQ_MODE: - return &vcpu->arch.regs.KVM_ARM_FIQ_spsr; - default: - BUG(); - } -} - -/** - * kvm_handle_wfi - handle a wait-for-interrupts instruction executed by a guest - * @vcpu: the vcpu pointer - * @run: the kvm_run structure pointer - * - * Simply sets the wait_for_interrupts flag on the vcpu structure, which will - * halt execution of world-switches and schedule other host processes until - * there is an incoming IRQ or FIQ to the VM. - */ -int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - trace_kvm_wfi(*vcpu_pc(vcpu)); - kvm_vcpu_block(vcpu); - return 1; -} - -/** - * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block - * @vcpu: The VCPU pointer - * - * When exceptions occur while instructions are executed in Thumb IF-THEN - * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have - * to do this little bit of work manually. The fields map like this: - * - * IT[7:0] -> CPSR[26:25],CPSR[15:10] - */ -static void kvm_adjust_itstate(struct kvm_vcpu *vcpu) -{ - unsigned long itbits, cond; - unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_arm = !(cpsr & PSR_T_BIT); - - BUG_ON(is_arm && (cpsr & PSR_IT_MASK)); - - if (!(cpsr & PSR_IT_MASK)) - return; - - cond = (cpsr & 0xe000) >> 13; - itbits = (cpsr & 0x1c00) >> (10 - 2); - itbits |= (cpsr & (0x3 << 25)) >> 25; - - /* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */ - if ((itbits & 0x7) == 0) - itbits = cond = 0; - else - itbits = (itbits << 1) & 0x1f; - - cpsr &= ~PSR_IT_MASK; - cpsr |= cond << 13; - cpsr |= (itbits & 0x1c) << (10 - 2); - cpsr |= (itbits & 0x3) << 25; - *vcpu_cpsr(vcpu) = cpsr; -} - -/** - * kvm_skip_instr - skip a trapped instruction and proceed to the next - * @vcpu: The vcpu pointer - */ -void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr) -{ - bool is_thumb; - - is_thumb = !!(*vcpu_cpsr(vcpu) & PSR_T_BIT); - if (is_thumb && !is_wide_instr) - *vcpu_pc(vcpu) += 2; - else - *vcpu_pc(vcpu) += 4; - kvm_adjust_itstate(vcpu); -} - - -/****************************************************************************** - * Inject exceptions into the guest - */ - -static u32 exc_vector_base(struct kvm_vcpu *vcpu) -{ - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - u32 vbar = vcpu->arch.cp15[c12_VBAR]; - - if (sctlr & SCTLR_V) - return 0xffff0000; - else /* always have security exceptions */ - return vbar; -} - -/** - * kvm_inject_undefined - inject an undefined exception into the guest - * @vcpu: The VCPU to receive the undefined exception - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - * - * Modelled after TakeUndefInstrException() pseudocode. - */ -void kvm_inject_undefined(struct kvm_vcpu *vcpu) -{ - u32 new_lr_value; - u32 new_spsr_value; - u32 cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - bool is_thumb = (cpsr & PSR_T_BIT); - u32 vect_offset = 4; - u32 return_offset = (is_thumb) ? 2 : 4; - - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) - return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to UND banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; - - /* Branch to exception vector */ - *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; -} - -/* - * Modelled after TakeDataAbortException() and TakePrefetchAbortException - * pseudocode. - */ -static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) -{ - u32 new_lr_value; - u32 new_spsr_value; - u32 cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu->arch.cp15[c1_SCTLR]; - bool is_thumb = (cpsr & PSR_T_BIT); - u32 vect_offset; - u32 return_offset = (is_thumb) ? 4 : 0; - bool is_lpae; - - new_spsr_value = cpsr; - new_lr_value = *vcpu_pc(vcpu) + return_offset; - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE; - *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT; - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to ABT banked copies */ - *vcpu_spsr(vcpu) = cpsr; - *vcpu_reg(vcpu, 14) = new_lr_value; - - if (is_pabt) - vect_offset = 12; - else - vect_offset = 16; - - /* Branch to exception vector */ - *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; - - if (is_pabt) { - /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_IFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); - /* Always give debug fault for now - should give guest a clue */ - if (is_lpae) - vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22; - else - vcpu->arch.cp15[c5_IFSR] = 2; - } else { /* !iabt */ - /* Set DFAR and DFSR */ - vcpu->arch.cp15[c6_DFAR] = addr; - is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31); - /* Always give debug fault for now - should give guest a clue */ - if (is_lpae) - vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22; - else - vcpu->arch.cp15[c5_DFSR] = 2; - } - -} - -/** - * kvm_inject_dabt - inject a data abort into the guest - * @vcpu: The VCPU to receive the undefined exception - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - inject_abt(vcpu, false, addr); -} - -/** - * kvm_inject_pabt - inject a prefetch abort into the guest - * @vcpu: The VCPU to receive the undefined exception - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - inject_abt(vcpu, true, addr); -} diff --git a/trunk/arch/arm/kvm/guest.c b/trunk/arch/arm/kvm/guest.c deleted file mode 100644 index 2339d9609d36..000000000000 --- a/trunk/arch/arm/kvm/guest.c +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define VM_STAT(x) { #x, offsetof(struct kvm, stat.x), KVM_STAT_VM } -#define VCPU_STAT(x) { #x, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU } - -struct kvm_stats_debugfs_item debugfs_entries[] = { - { NULL } -}; - -int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - -static u64 core_reg_offset_from_id(u64 id) -{ - return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE); -} - -static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; - u64 off; - - if (KVM_REG_SIZE(reg->id) != 4) - return -ENOENT; - - /* Our ID is an index into the kvm_regs struct. */ - off = core_reg_offset_from_id(reg->id); - if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) - return -ENOENT; - - return put_user(((u32 *)regs)[off], uaddr); -} - -static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - u32 __user *uaddr = (u32 __user *)(long)reg->addr; - struct kvm_regs *regs = &vcpu->arch.regs; - u64 off, val; - - if (KVM_REG_SIZE(reg->id) != 4) - return -ENOENT; - - /* Our ID is an index into the kvm_regs struct. */ - off = core_reg_offset_from_id(reg->id); - if (off >= sizeof(*regs) / KVM_REG_SIZE(reg->id)) - return -ENOENT; - - if (get_user(val, uaddr) != 0) - return -EFAULT; - - if (off == KVM_REG_ARM_CORE_REG(usr_regs.ARM_cpsr)) { - unsigned long mode = val & MODE_MASK; - switch (mode) { - case USR_MODE: - case FIQ_MODE: - case IRQ_MODE: - case SVC_MODE: - case ABT_MODE: - case UND_MODE: - break; - default: - return -EINVAL; - } - } - - ((u32 *)regs)[off] = val; - return 0; -} - -int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) -{ - return -EINVAL; -} - -static unsigned long num_core_regs(void) -{ - return sizeof(struct kvm_regs) / sizeof(u32); -} - -/** - * kvm_arm_num_regs - how many registers do we present via KVM_GET_ONE_REG - * - * This is for all registers. - */ -unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu) -{ - return num_core_regs() + kvm_arm_num_coproc_regs(vcpu); -} - -/** - * kvm_arm_copy_reg_indices - get indices of all registers. - * - * We do core registers right here, then we apppend coproc regs. - */ -int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) -{ - unsigned int i; - const u64 core_reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_CORE; - - for (i = 0; i < sizeof(struct kvm_regs)/sizeof(u32); i++) { - if (put_user(core_reg | i, uindices)) - return -EFAULT; - uindices++; - } - - return kvm_arm_copy_coproc_indices(vcpu, uindices); -} - -int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - /* We currently use nothing arch-specific in upper 32 bits */ - if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) - return -EINVAL; - - /* Register group 16 means we want a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return get_core_reg(vcpu, reg); - - return kvm_arm_coproc_get_reg(vcpu, reg); -} - -int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) -{ - /* We currently use nothing arch-specific in upper 32 bits */ - if ((reg->id & ~KVM_REG_SIZE_MASK) >> 32 != KVM_REG_ARM >> 32) - return -EINVAL; - - /* Register group 16 means we set a core register. */ - if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_CORE) - return set_core_reg(vcpu, reg); - - return kvm_arm_coproc_set_reg(vcpu, reg); -} - -int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, - struct kvm_sregs *sregs) -{ - return -EINVAL; -} - -int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, - const struct kvm_vcpu_init *init) -{ - unsigned int i; - - /* We can only do a cortex A15 for now. */ - if (init->target != kvm_target_cpu()) - return -EINVAL; - - vcpu->arch.target = init->target; - bitmap_zero(vcpu->arch.features, KVM_VCPU_MAX_FEATURES); - - /* -ENOENT for unknown features, -EINVAL for invalid combinations. */ - for (i = 0; i < sizeof(init->features) * 8; i++) { - if (test_bit(i, (void *)init->features)) { - if (i >= KVM_VCPU_MAX_FEATURES) - return -ENOENT; - set_bit(i, vcpu->arch.features); - } - } - - /* Now we know what it is, we can reset it. */ - return kvm_reset_vcpu(vcpu); -} - -int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) -{ - return -EINVAL; -} - -int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, - struct kvm_translation *tr) -{ - return -EINVAL; -} diff --git a/trunk/arch/arm/kvm/init.S b/trunk/arch/arm/kvm/init.S deleted file mode 100644 index 9f37a79b880b..000000000000 --- a/trunk/arch/arm/kvm/init.S +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include - -/******************************************************************** - * Hypervisor initialization - * - should be called with: - * r0,r1 = Hypervisor pgd pointer - * r2 = top of Hyp stack (kernel VA) - * r3 = pointer to hyp vectors - */ - - .text - .pushsection .hyp.idmap.text,"ax" - .align 5 -__kvm_hyp_init: - .globl __kvm_hyp_init - - @ Hyp-mode exception vector - W(b) . - W(b) . - W(b) . - W(b) . - W(b) . - W(b) __do_hyp_init - W(b) . - W(b) . - -__do_hyp_init: - @ Set the HTTBR to point to the hypervisor PGD pointer passed - mcrr p15, 4, r0, r1, c2 - - @ Set the HTCR and VTCR to the same shareability and cacheability - @ settings as the non-secure TTBCR and with T0SZ == 0. - mrc p15, 4, r0, c2, c0, 2 @ HTCR - ldr r12, =HTCR_MASK - bic r0, r0, r12 - mrc p15, 0, r1, c2, c0, 2 @ TTBCR - and r1, r1, #(HTCR_MASK & ~TTBCR_T0SZ) - orr r0, r0, r1 - mcr p15, 4, r0, c2, c0, 2 @ HTCR - - mrc p15, 4, r1, c2, c1, 2 @ VTCR - ldr r12, =VTCR_MASK - bic r1, r1, r12 - bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits - orr r1, r0, r1 - orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S) - mcr p15, 4, r1, c2, c1, 2 @ VTCR - - @ Use the same memory attributes for hyp. accesses as the kernel - @ (copy MAIRx ro HMAIRx). - mrc p15, 0, r0, c10, c2, 0 - mcr p15, 4, r0, c10, c2, 0 - mrc p15, 0, r0, c10, c2, 1 - mcr p15, 4, r0, c10, c2, 1 - - @ Set the HSCTLR to: - @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) - @ - Endianness: Kernel config - @ - Fast Interrupt Features: Kernel config - @ - Write permission implies XN: disabled - @ - Instruction cache: enabled - @ - Data/Unified cache: enabled - @ - Memory alignment checks: enabled - @ - MMU: enabled (this code must be run from an identity mapping) - mrc p15, 4, r0, c1, c0, 0 @ HSCR - ldr r12, =HSCTLR_MASK - bic r0, r0, r12 - mrc p15, 0, r1, c1, c0, 0 @ SCTLR - ldr r12, =(HSCTLR_EE | HSCTLR_FI | HSCTLR_I | HSCTLR_C) - and r1, r1, r12 - ARM( ldr r12, =(HSCTLR_M | HSCTLR_A) ) - THUMB( ldr r12, =(HSCTLR_M | HSCTLR_A | HSCTLR_TE) ) - orr r1, r1, r12 - orr r0, r0, r1 - isb - mcr p15, 4, r0, c1, c0, 0 @ HSCR - isb - - @ Set stack pointer and return to the kernel - mov sp, r2 - - @ Set HVBAR to point to the HYP vectors - mcr p15, 4, r3, c12, c0, 0 @ HVBAR - - eret - - .ltorg - - .globl __kvm_hyp_init_end -__kvm_hyp_init_end: - - .popsection diff --git a/trunk/arch/arm/kvm/interrupts.S b/trunk/arch/arm/kvm/interrupts.S deleted file mode 100644 index c5400d2e97ca..000000000000 --- a/trunk/arch/arm/kvm/interrupts.S +++ /dev/null @@ -1,478 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "interrupts_head.S" - - .text - -__kvm_hyp_code_start: - .globl __kvm_hyp_code_start - -/******************************************************************** - * Flush per-VMID TLBs - * - * void __kvm_tlb_flush_vmid(struct kvm *kvm); - * - * We rely on the hardware to broadcast the TLB invalidation to all CPUs - * inside the inner-shareable domain (which is the case for all v7 - * implementations). If we come across a non-IS SMP implementation, we'll - * have to use an IPI based mechanism. Until then, we stick to the simple - * hardware assisted version. - */ -ENTRY(__kvm_tlb_flush_vmid) - push {r2, r3} - - add r0, r0, #KVM_VTTBR - ldrd r2, r3, [r0] - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - isb - mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) - dsb - isb - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 - isb @ Not necessary if followed by eret - - pop {r2, r3} - bx lr -ENDPROC(__kvm_tlb_flush_vmid) - -/******************************************************************** - * Flush TLBs and instruction caches of all CPUs inside the inner-shareable - * domain, for all VMIDs - * - * void __kvm_flush_vm_context(void); - */ -ENTRY(__kvm_flush_vm_context) - mov r0, #0 @ rn parameter for c15 flushes is SBZ - - /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ - mcr p15, 4, r0, c8, c3, 4 - /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ - mcr p15, 0, r0, c7, c1, 0 - dsb - isb @ Not necessary if followed by eret - - bx lr -ENDPROC(__kvm_flush_vm_context) - - -/******************************************************************** - * Hypervisor world-switch code - * - * - * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) - */ -ENTRY(__kvm_vcpu_run) - @ Save the vcpu pointer - mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR - - save_host_regs - - @ Store hardware CP15 state and load guest state - read_cp15_state store_to_vcpu = 0 - write_cp15_state read_from_vcpu = 1 - - @ If the host kernel has not been configured with VFPv3 support, - @ then it is safer if we deny guests from using it as well. -#ifdef CONFIG_VFPv3 - @ Set FPEXC_EN so the guest doesn't trap floating point instructions - VFPFMRX r2, FPEXC @ VMRS - push {r2} - orr r2, r2, #FPEXC_EN - VFPFMXR FPEXC, r2 @ VMSR -#endif - - @ Configure Hyp-role - configure_hyp_role vmentry - - @ Trap coprocessor CRx accesses - set_hstr vmentry - set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) - set_hdcr vmentry - - @ Write configured ID register into MIDR alias - ldr r1, [vcpu, #VCPU_MIDR] - mcr p15, 4, r1, c0, c0, 0 - - @ Write guest view of MPIDR into VMPIDR - ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] - mcr p15, 4, r1, c0, c0, 5 - - @ Set up guest memory translation - ldr r1, [vcpu, #VCPU_KVM] - add r1, r1, #KVM_VTTBR - ldrd r2, r3, [r1] - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - - @ We're all done, just restore the GPRs and go to the guest - restore_guest_regs - clrex @ Clear exclusive monitor - eret - -__kvm_vcpu_return: - /* - * return convention: - * guest r0, r1, r2 saved on the stack - * r0: vcpu pointer - * r1: exception code - */ - save_guest_regs - - @ Set VMID == 0 - mov r2, #0 - mov r3, #0 - mcrr p15, 6, r2, r3, c2 @ Write VTTBR - - @ Don't trap coprocessor accesses for host kernel - set_hstr vmexit - set_hdcr vmexit - set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) - -#ifdef CONFIG_VFPv3 - @ Save floating point registers we if let guest use them. - tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) - bne after_vfp_restore - - @ Switch VFP/NEON hardware state to the host's - add r7, vcpu, #VCPU_VFP_GUEST - store_vfp_state r7 - add r7, vcpu, #VCPU_VFP_HOST - ldr r7, [r7] - restore_vfp_state r7 - -after_vfp_restore: - @ Restore FPEXC_EN which we clobbered on entry - pop {r2} - VFPFMXR FPEXC, r2 -#endif - - @ Reset Hyp-role - configure_hyp_role vmexit - - @ Let host read hardware MIDR - mrc p15, 0, r2, c0, c0, 0 - mcr p15, 4, r2, c0, c0, 0 - - @ Back to hardware MPIDR - mrc p15, 0, r2, c0, c0, 5 - mcr p15, 4, r2, c0, c0, 5 - - @ Store guest CP15 state and restore host state - read_cp15_state store_to_vcpu = 1 - write_cp15_state read_from_vcpu = 0 - - restore_host_regs - clrex @ Clear exclusive monitor - mov r0, r1 @ Return the return code - mov r1, #0 @ Clear upper bits in return value - bx lr @ return to IOCTL - -/******************************************************************** - * Call function in Hyp mode - * - * - * u64 kvm_call_hyp(void *hypfn, ...); - * - * This is not really a variadic function in the classic C-way and care must - * be taken when calling this to ensure parameters are passed in registers - * only, since the stack will change between the caller and the callee. - * - * Call the function with the first argument containing a pointer to the - * function you wish to call in Hyp mode, and subsequent arguments will be - * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the - * function pointer can be passed). The function being called must be mapped - * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are - * passed in r0 and r1. - * - * The calling convention follows the standard AAPCS: - * r0 - r3: caller save - * r12: caller save - * rest: callee save - */ -ENTRY(kvm_call_hyp) - hvc #0 - bx lr - -/******************************************************************** - * Hypervisor exception vector and handlers - * - * - * The KVM/ARM Hypervisor ABI is defined as follows: - * - * Entry to Hyp mode from the host kernel will happen _only_ when an HVC - * instruction is issued since all traps are disabled when running the host - * kernel as per the Hyp-mode initialization at boot time. - * - * HVC instructions cause a trap to the vector page + offset 0x18 (see hyp_hvc - * below) when the HVC instruction is called from SVC mode (i.e. a guest or the - * host kernel) and they cause a trap to the vector page + offset 0xc when HVC - * instructions are called from within Hyp-mode. - * - * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): - * Switching to Hyp mode is done through a simple HVC #0 instruction. The - * exception vector code will check that the HVC comes from VMID==0 and if - * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. - * - r0 contains a pointer to a HYP function - * - r1, r2, and r3 contain arguments to the above function. - * - The HYP function will be called with its arguments in r0, r1 and r2. - * On HYP function return, we return directly to SVC. - * - * Note that the above is used to execute code in Hyp-mode from a host-kernel - * point of view, and is a different concept from performing a world-switch and - * executing guest code SVC mode (with a VMID != 0). - */ - -/* Handle undef, svc, pabt, or dabt by crashing with a user notice */ -.macro bad_exception exception_code, panic_str - push {r0-r2} - mrrc p15, 6, r0, r1, c2 @ Read VTTBR - lsr r1, r1, #16 - ands r1, r1, #0xff - beq 99f - - load_vcpu @ Load VCPU pointer - .if \exception_code == ARM_EXCEPTION_DATA_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 0 @ HDFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - .if \exception_code == ARM_EXCEPTION_PREF_ABORT - mrc p15, 4, r2, c5, c2, 0 @ HSR - mrc p15, 4, r1, c6, c0, 2 @ HIFAR - str r2, [vcpu, #VCPU_HSR] - str r1, [vcpu, #VCPU_HxFAR] - .endif - mov r1, #\exception_code - b __kvm_vcpu_return - - @ We were in the host already. Let's craft a panic-ing return to SVC. -99: mrs r2, cpsr - bic r2, r2, #MODE_MASK - orr r2, r2, #SVC_MODE -THUMB( orr r2, r2, #PSR_T_BIT ) - msr spsr_cxsf, r2 - mrs r1, ELR_hyp - ldr r2, =BSYM(panic) - msr ELR_hyp, r2 - ldr r0, =\panic_str - eret -.endm - - .text - - .align 5 -__kvm_hyp_vector: - .globl __kvm_hyp_vector - - @ Hyp-mode exception vector - W(b) hyp_reset - W(b) hyp_undef - W(b) hyp_svc - W(b) hyp_pabt - W(b) hyp_dabt - W(b) hyp_hvc - W(b) hyp_irq - W(b) hyp_fiq - - .align -hyp_reset: - b hyp_reset - - .align -hyp_undef: - bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str - - .align -hyp_svc: - bad_exception ARM_EXCEPTION_HVC, svc_die_str - - .align -hyp_pabt: - bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str - - .align -hyp_dabt: - bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str - - .align -hyp_hvc: - /* - * Getting here is either becuase of a trap from a guest or from calling - * HVC from the host kernel, which means "switch to Hyp mode". - */ - push {r0, r1, r2} - - @ Check syndrome register - mrc p15, 4, r1, c5, c2, 0 @ HSR - lsr r0, r1, #HSR_EC_SHIFT -#ifdef CONFIG_VFPv3 - cmp r0, #HSR_EC_CP_0_13 - beq switch_to_guest_vfp -#endif - cmp r0, #HSR_EC_HVC - bne guest_trap @ Not HVC instr. - - /* - * Let's check if the HVC came from VMID 0 and allow simple - * switch to Hyp mode - */ - mrrc p15, 6, r0, r2, c2 - lsr r2, r2, #16 - and r2, r2, #0xff - cmp r2, #0 - bne guest_trap @ Guest called HVC - -host_switch_to_hyp: - pop {r0, r1, r2} - - push {lr} - mrs lr, SPSR - push {lr} - - mov lr, r0 - mov r0, r1 - mov r1, r2 - mov r2, r3 - -THUMB( orr lr, #1) - blx lr @ Call the HYP function - - pop {lr} - msr SPSR_csxf, lr - pop {lr} - eret - -guest_trap: - load_vcpu @ Load VCPU pointer to r0 - str r1, [vcpu, #VCPU_HSR] - - @ Check if we need the fault information - lsr r1, r1, #HSR_EC_SHIFT - cmp r1, #HSR_EC_IABT - mrceq p15, 4, r2, c6, c0, 2 @ HIFAR - beq 2f - cmp r1, #HSR_EC_DABT - bne 1f - mrc p15, 4, r2, c6, c0, 0 @ HDFAR - -2: str r2, [vcpu, #VCPU_HxFAR] - - /* - * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: - * - * Abort on the stage 2 translation for a memory access from a - * Non-secure PL1 or PL0 mode: - * - * For any Access flag fault or Translation fault, and also for any - * Permission fault on the stage 2 translation of a memory access - * made as part of a translation table walk for a stage 1 translation, - * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR - * is UNKNOWN. - */ - - /* Check for permission fault, and S1PTW */ - mrc p15, 4, r1, c5, c2, 0 @ HSR - and r0, r1, #HSR_FSC_TYPE - cmp r0, #FSC_PERM - tsteq r1, #(1 << 7) @ S1PTW - mrcne p15, 4, r2, c6, c0, 4 @ HPFAR - bne 3f - - /* Resolve IPA using the xFAR */ - mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR - isb - mrrc p15, 0, r0, r1, c7 @ PAR - tst r0, #1 - bne 4f @ Failed translation - ubfx r2, r0, #12, #20 - lsl r2, r2, #4 - orr r2, r2, r1, lsl #24 - -3: load_vcpu @ Load VCPU pointer to r0 - str r2, [r0, #VCPU_HPFAR] - -1: mov r1, #ARM_EXCEPTION_HVC - b __kvm_vcpu_return - -4: pop {r0, r1, r2} @ Failed translation, return to guest - eret - -/* - * If VFPv3 support is not available, then we will not switch the VFP - * registers; however cp10 and cp11 accesses will still trap and fallback - * to the regular coprocessor emulation code, which currently will - * inject an undefined exception to the guest. - */ -#ifdef CONFIG_VFPv3 -switch_to_guest_vfp: - load_vcpu @ Load VCPU pointer to r0 - push {r3-r7} - - @ NEON/VFP used. Turn on VFP access. - set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) - - @ Switch VFP/NEON hardware state to the guest's - add r7, r0, #VCPU_VFP_HOST - ldr r7, [r7] - store_vfp_state r7 - add r7, r0, #VCPU_VFP_GUEST - restore_vfp_state r7 - - pop {r3-r7} - pop {r0-r2} - eret -#endif - - .align -hyp_irq: - push {r0, r1, r2} - mov r1, #ARM_EXCEPTION_IRQ - load_vcpu @ Load VCPU pointer to r0 - b __kvm_vcpu_return - - .align -hyp_fiq: - b hyp_fiq - - .ltorg - -__kvm_hyp_code_end: - .globl __kvm_hyp_code_end - - .section ".rodata" - -und_die_str: - .ascii "unexpected undefined exception in Hyp mode at: %#08x" -pabt_die_str: - .ascii "unexpected prefetch abort in Hyp mode at: %#08x" -dabt_die_str: - .ascii "unexpected data abort in Hyp mode at: %#08x" -svc_die_str: - .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x" diff --git a/trunk/arch/arm/kvm/interrupts_head.S b/trunk/arch/arm/kvm/interrupts_head.S deleted file mode 100644 index 6a95d341e9c5..000000000000 --- a/trunk/arch/arm/kvm/interrupts_head.S +++ /dev/null @@ -1,441 +0,0 @@ -#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4)) -#define VCPU_USR_SP (VCPU_USR_REG(13)) -#define VCPU_USR_LR (VCPU_USR_REG(14)) -#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4)) - -/* - * Many of these macros need to access the VCPU structure, which is always - * held in r0. These macros should never clobber r1, as it is used to hold the - * exception code on the return path (except of course the macro that switches - * all the registers before the final jump to the VM). - */ -vcpu .req r0 @ vcpu pointer always in r0 - -/* Clobbers {r2-r6} */ -.macro store_vfp_state vfp_base - @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions - VFPFMRX r2, FPEXC - @ Make sure VFP is enabled so we can touch the registers. - orr r6, r2, #FPEXC_EN - VFPFMXR FPEXC, r6 - - VFPFMRX r3, FPSCR - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so - @ we only need to save them if FPEXC_EX is set. - VFPFMRX r4, FPINST - tst r2, #FPEXC_FP2V - VFPFMRX r5, FPINST2, ne @ vmrsne - bic r6, r2, #FPEXC_EX @ FPEXC_EX disable - VFPFMXR FPEXC, r6 -1: - VFPFSTMIA \vfp_base, r6 @ Save VFP registers - stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2 -.endm - -/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */ -.macro restore_vfp_state vfp_base - VFPFLDMIA \vfp_base, r6 @ Load VFP registers - ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2 - - VFPFMXR FPSCR, r3 - tst r2, #FPEXC_EX @ Check for VFP Subarchitecture - beq 1f - VFPFMXR FPINST, r4 - tst r2, #FPEXC_FP2V - VFPFMXR FPINST2, r5, ne -1: - VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN) -.endm - -/* These are simply for the macros to work - value don't have meaning */ -.equ usr, 0 -.equ svc, 1 -.equ abt, 2 -.equ und, 3 -.equ irq, 4 -.equ fiq, 5 - -.macro push_host_regs_mode mode - mrs r2, SP_\mode - mrs r3, LR_\mode - mrs r4, SPSR_\mode - push {r2, r3, r4} -.endm - -/* - * Store all host persistent registers on the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro save_host_regs - /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */ - mrs r2, ELR_hyp - push {r2} - - /* usr regs */ - push {r4-r12} @ r0-r3 are always clobbered - mrs r2, SP_usr - mov r3, lr - push {r2, r3} - - push_host_regs_mode svc - push_host_regs_mode abt - push_host_regs_mode und - push_host_regs_mode irq - - /* fiq regs */ - mrs r2, r8_fiq - mrs r3, r9_fiq - mrs r4, r10_fiq - mrs r5, r11_fiq - mrs r6, r12_fiq - mrs r7, SP_fiq - mrs r8, LR_fiq - mrs r9, SPSR_fiq - push {r2-r9} -.endm - -.macro pop_host_regs_mode mode - pop {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all host registers from the stack. - * Clobbers all registers, in all modes, except r0 and r1. - */ -.macro restore_host_regs - pop {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - pop_host_regs_mode irq - pop_host_regs_mode und - pop_host_regs_mode abt - pop_host_regs_mode svc - - pop {r2, r3} - msr SP_usr, r2 - mov lr, r3 - pop {r4-r12} - - pop {r2} - msr ELR_hyp, r2 -.endm - -/* - * Restore SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r1, r2, r3, r4. - */ -.macro restore_guest_regs_mode mode, offset - add r1, vcpu, \offset - ldm r1, {r2, r3, r4} - msr SP_\mode, r2 - msr LR_\mode, r3 - msr SPSR_\mode, r4 -.endm - -/* - * Restore all guest registers from the vcpu struct. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers *all* registers. - */ -.macro restore_guest_regs - restore_guest_regs_mode svc, #VCPU_SVC_REGS - restore_guest_regs_mode abt, #VCPU_ABT_REGS - restore_guest_regs_mode und, #VCPU_UND_REGS - restore_guest_regs_mode irq, #VCPU_IRQ_REGS - - add r1, vcpu, #VCPU_FIQ_REGS - ldm r1, {r2-r9} - msr r8_fiq, r2 - msr r9_fiq, r3 - msr r10_fiq, r4 - msr r11_fiq, r5 - msr r12_fiq, r6 - msr SP_fiq, r7 - msr LR_fiq, r8 - msr SPSR_fiq, r9 - - @ Load return state - ldr r2, [vcpu, #VCPU_PC] - ldr r3, [vcpu, #VCPU_CPSR] - msr ELR_hyp, r2 - msr SPSR_cxsf, r3 - - @ Load user registers - ldr r2, [vcpu, #VCPU_USR_SP] - ldr r3, [vcpu, #VCPU_USR_LR] - msr SP_usr, r2 - mov lr, r3 - add vcpu, vcpu, #(VCPU_USR_REGS) - ldm vcpu, {r0-r12} -.endm - -/* - * Save SP, LR and SPSR for a given mode. offset is the offset of - * this mode's registers from the VCPU base. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs_mode mode, offset - add r2, vcpu, \offset - mrs r3, SP_\mode - mrs r4, LR_\mode - mrs r5, SPSR_\mode - stm r2, {r3, r4, r5} -.endm - -/* - * Save all guest registers to the vcpu struct - * Expects guest's r0, r1, r2 on the stack. - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2, r3, r4, r5. - */ -.macro save_guest_regs - @ Store usr registers - add r2, vcpu, #VCPU_USR_REG(3) - stm r2, {r3-r12} - add r2, vcpu, #VCPU_USR_REG(0) - pop {r3, r4, r5} @ r0, r1, r2 - stm r2, {r3, r4, r5} - mrs r2, SP_usr - mov r3, lr - str r2, [vcpu, #VCPU_USR_SP] - str r3, [vcpu, #VCPU_USR_LR] - - @ Store return state - mrs r2, ELR_hyp - mrs r3, spsr - str r2, [vcpu, #VCPU_PC] - str r3, [vcpu, #VCPU_CPSR] - - @ Store other guest registers - save_guest_regs_mode svc, #VCPU_SVC_REGS - save_guest_regs_mode abt, #VCPU_ABT_REGS - save_guest_regs_mode und, #VCPU_UND_REGS - save_guest_regs_mode irq, #VCPU_IRQ_REGS -.endm - -/* Reads cp15 registers from hardware and stores them in memory - * @store_to_vcpu: If 0, registers are written in-order to the stack, - * otherwise to the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - * - * Clobbers r2 - r12 - */ -.macro read_cp15_state store_to_vcpu - mrc p15, 0, r2, c1, c0, 0 @ SCTLR - mrc p15, 0, r3, c1, c0, 2 @ CPACR - mrc p15, 0, r4, c2, c0, 2 @ TTBCR - mrc p15, 0, r5, c3, c0, 0 @ DACR - mrrc p15, 0, r6, r7, c2 @ TTBR 0 - mrrc p15, 1, r8, r9, c2 @ TTBR 1 - mrc p15, 0, r10, c10, c2, 0 @ PRRR - mrc p15, 0, r11, c10, c2, 1 @ NMRR - mrc p15, 2, r12, c0, c0, 0 @ CSSELR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - str r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - str r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r2, vcpu, #CP15_OFFSET(c2_TTBR0) - strd r6, r7, [r2] - add r2, vcpu, #CP15_OFFSET(c2_TTBR1) - strd r8, r9, [r2] - str r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - str r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mrc p15, 0, r2, c13, c0, 1 @ CID - mrc p15, 0, r3, c13, c0, 2 @ TID_URW - mrc p15, 0, r4, c13, c0, 3 @ TID_URO - mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV - mrc p15, 0, r6, c5, c0, 0 @ DFSR - mrc p15, 0, r7, c5, c0, 1 @ IFSR - mrc p15, 0, r8, c5, c1, 0 @ ADFSR - mrc p15, 0, r9, c5, c1, 1 @ AIFSR - mrc p15, 0, r10, c6, c0, 0 @ DFAR - mrc p15, 0, r11, c6, c0, 2 @ IFAR - mrc p15, 0, r12, c12, c0, 0 @ VBAR - - .if \store_to_vcpu == 0 - push {r2-r12} @ Push CP15 registers - .else - str r2, [vcpu, #CP15_OFFSET(c13_CID)] - str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - str r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - str r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - str r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - str r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - str r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif -.endm - -/* - * Reads cp15 registers from memory and writes them to hardware - * @read_from_vcpu: If 0, registers are read in-order from the stack, - * otherwise from the VCPU struct pointed to by vcpup - * - * Assumes vcpu pointer in vcpu reg - */ -.macro write_cp15_state read_from_vcpu - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c13_CID)] - ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)] - ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)] - ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)] - ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)] - ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)] - ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)] - ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)] - ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)] - ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)] - ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)] - .endif - - mcr p15, 0, r2, c13, c0, 1 @ CID - mcr p15, 0, r3, c13, c0, 2 @ TID_URW - mcr p15, 0, r4, c13, c0, 3 @ TID_URO - mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV - mcr p15, 0, r6, c5, c0, 0 @ DFSR - mcr p15, 0, r7, c5, c0, 1 @ IFSR - mcr p15, 0, r8, c5, c1, 0 @ ADFSR - mcr p15, 0, r9, c5, c1, 1 @ AIFSR - mcr p15, 0, r10, c6, c0, 0 @ DFAR - mcr p15, 0, r11, c6, c0, 2 @ IFAR - mcr p15, 0, r12, c12, c0, 0 @ VBAR - - .if \read_from_vcpu == 0 - pop {r2-r12} - .else - ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)] - ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)] - ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)] - ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)] - add r12, vcpu, #CP15_OFFSET(c2_TTBR0) - ldrd r6, r7, [r12] - add r12, vcpu, #CP15_OFFSET(c2_TTBR1) - ldrd r8, r9, [r12] - ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)] - ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)] - ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)] - .endif - - mcr p15, 0, r2, c1, c0, 0 @ SCTLR - mcr p15, 0, r3, c1, c0, 2 @ CPACR - mcr p15, 0, r4, c2, c0, 2 @ TTBCR - mcr p15, 0, r5, c3, c0, 0 @ DACR - mcrr p15, 0, r6, r7, c2 @ TTBR 0 - mcrr p15, 1, r8, r9, c2 @ TTBR 1 - mcr p15, 0, r10, c10, c2, 0 @ PRRR - mcr p15, 0, r11, c10, c2, 1 @ NMRR - mcr p15, 2, r12, c0, c0, 0 @ CSSELR -.endm - -/* - * Save the VGIC CPU state into memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro save_vgic_state -.endm - -/* - * Restore the VGIC CPU state from memory - * - * Assumes vcpu pointer in vcpu reg - */ -.macro restore_vgic_state -.endm - -.equ vmentry, 0 -.equ vmexit, 1 - -/* Configures the HSTR (Hyp System Trap Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hstr operation - mrc p15, 4, r2, c1, c1, 3 - ldr r3, =HSTR_T(15) - .if \operation == vmentry - orr r2, r2, r3 @ Trap CR{15} - .else - bic r2, r2, r3 @ Don't trap any CRx accesses - .endif - mcr p15, 4, r2, c1, c1, 3 -.endm - -/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return - * (hardware reset value is 0). Keep previous value in r2. */ -.macro set_hcptr operation, mask - mrc p15, 4, r2, c1, c1, 2 - ldr r3, =\mask - .if \operation == vmentry - orr r3, r2, r3 @ Trap coproc-accesses defined in mask - .else - bic r3, r2, r3 @ Don't trap defined coproc-accesses - .endif - mcr p15, 4, r3, c1, c1, 2 -.endm - -/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return - * (hardware reset value is 0) */ -.macro set_hdcr operation - mrc p15, 4, r2, c1, c1, 1 - ldr r3, =(HDCR_TPM|HDCR_TPMCR) - .if \operation == vmentry - orr r2, r2, r3 @ Trap some perfmon accesses - .else - bic r2, r2, r3 @ Don't trap any perfmon accesses - .endif - mcr p15, 4, r2, c1, c1, 1 -.endm - -/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */ -.macro configure_hyp_role operation - mrc p15, 4, r2, c1, c1, 0 @ HCR - bic r2, r2, #HCR_VIRT_EXCP_MASK - ldr r3, =HCR_GUEST_MASK - .if \operation == vmentry - orr r2, r2, r3 - ldr r3, [vcpu, #VCPU_IRQ_LINES] - orr r2, r2, r3 - .else - bic r2, r2, r3 - .endif - mcr p15, 4, r2, c1, c1, 0 -.endm - -.macro load_vcpu - mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR -.endm diff --git a/trunk/arch/arm/kvm/mmio.c b/trunk/arch/arm/kvm/mmio.c deleted file mode 100644 index 0144baf82904..000000000000 --- a/trunk/arch/arm/kvm/mmio.c +++ /dev/null @@ -1,153 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include - -#include "trace.h" - -/** - * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation - * @vcpu: The VCPU pointer - * @run: The VCPU run struct containing the mmio data - * - * This should only be called after returning from userspace for MMIO load - * emulation. - */ -int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - __u32 *dest; - unsigned int len; - int mask; - - if (!run->mmio.is_write) { - dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt); - memset(dest, 0, sizeof(int)); - - len = run->mmio.len; - if (len > 4) - return -EINVAL; - - memcpy(dest, run->mmio.data, len); - - trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, - *((u64 *)run->mmio.data)); - - if (vcpu->arch.mmio_decode.sign_extend && len < 4) { - mask = 1U << ((len * 8) - 1); - *dest = (*dest ^ mask) - mask; - } - } - - return 0; -} - -static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - struct kvm_exit_mmio *mmio) -{ - unsigned long rt, len; - bool is_write, sign_extend; - - if ((vcpu->arch.hsr >> 8) & 1) { - /* cache operation on I/O addr, tell guest unsupported */ - kvm_inject_dabt(vcpu, vcpu->arch.hxfar); - return 1; - } - - if ((vcpu->arch.hsr >> 7) & 1) { - /* page table accesses IO mem: tell guest to fix its TTBR */ - kvm_inject_dabt(vcpu, vcpu->arch.hxfar); - return 1; - } - - switch ((vcpu->arch.hsr >> 22) & 0x3) { - case 0: - len = 1; - break; - case 1: - len = 2; - break; - case 2: - len = 4; - break; - default: - kvm_err("Hardware is weird: SAS 0b11 is reserved\n"); - return -EFAULT; - } - - is_write = vcpu->arch.hsr & HSR_WNR; - sign_extend = vcpu->arch.hsr & HSR_SSE; - rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT; - - if (kvm_vcpu_reg_is_pc(vcpu, rt)) { - /* IO memory trying to read/write pc */ - kvm_inject_pabt(vcpu, vcpu->arch.hxfar); - return 1; - } - - mmio->is_write = is_write; - mmio->phys_addr = fault_ipa; - mmio->len = len; - vcpu->arch.mmio_decode.sign_extend = sign_extend; - vcpu->arch.mmio_decode.rt = rt; - - /* - * The MMIO instruction is emulated and should not be re-executed - * in the guest. - */ - kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1); - return 0; -} - -int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, - phys_addr_t fault_ipa) -{ - struct kvm_exit_mmio mmio; - unsigned long rt; - int ret; - - /* - * Prepare MMIO operation. First stash it in a private - * structure that we can use for in-kernel emulation. If the - * kernel can't handle it, copy it into run->mmio and let user - * space do its magic. - */ - - if (vcpu->arch.hsr & HSR_ISV) { - ret = decode_hsr(vcpu, fault_ipa, &mmio); - if (ret) - return ret; - } else { - kvm_err("load/store instruction decoding not implemented\n"); - return -ENOSYS; - } - - rt = vcpu->arch.mmio_decode.rt; - trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE : - KVM_TRACE_MMIO_READ_UNSATISFIED, - mmio.len, fault_ipa, - (mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0); - - if (mmio.is_write) - memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len); - - kvm_prepare_mmio(run, &mmio); - return 0; -} diff --git a/trunk/arch/arm/kvm/mmu.c b/trunk/arch/arm/kvm/mmu.c deleted file mode 100644 index f30e13163a96..000000000000 --- a/trunk/arch/arm/kvm/mmu.c +++ /dev/null @@ -1,787 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "trace.h" - -extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[]; - -static DEFINE_MUTEX(kvm_hyp_pgd_mutex); - -static void kvm_tlb_flush_vmid(struct kvm *kvm) -{ - kvm_call_hyp(__kvm_tlb_flush_vmid, kvm); -} - -static void kvm_set_pte(pte_t *pte, pte_t new_pte) -{ - pte_val(*pte) = new_pte; - /* - * flush_pmd_entry just takes a void pointer and cleans the necessary - * cache entries, so we can reuse the function for ptes. - */ - flush_pmd_entry(pte); -} - -static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, - int min, int max) -{ - void *page; - - BUG_ON(max > KVM_NR_MEM_OBJS); - if (cache->nobjs >= min) - return 0; - while (cache->nobjs < max) { - page = (void *)__get_free_page(PGALLOC_GFP); - if (!page) - return -ENOMEM; - cache->objects[cache->nobjs++] = page; - } - return 0; -} - -static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) -{ - while (mc->nobjs) - free_page((unsigned long)mc->objects[--mc->nobjs]); -} - -static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) -{ - void *p; - - BUG_ON(!mc || !mc->nobjs); - p = mc->objects[--mc->nobjs]; - return p; -} - -static void free_ptes(pmd_t *pmd, unsigned long addr) -{ - pte_t *pte; - unsigned int i; - - for (i = 0; i < PTRS_PER_PMD; i++, addr += PMD_SIZE) { - if (!pmd_none(*pmd) && pmd_table(*pmd)) { - pte = pte_offset_kernel(pmd, addr); - pte_free_kernel(NULL, pte); - } - pmd++; - } -} - -/** - * free_hyp_pmds - free a Hyp-mode level-2 tables and child level-3 tables - * - * Assumes this is a page table used strictly in Hyp-mode and therefore contains - * only mappings in the kernel memory area, which is above PAGE_OFFSET. - */ -void free_hyp_pmds(void) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - unsigned long addr; - - mutex_lock(&kvm_hyp_pgd_mutex); - for (addr = PAGE_OFFSET; addr != 0; addr += PGDIR_SIZE) { - pgd = hyp_pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - - if (pud_none(*pud)) - continue; - BUG_ON(pud_bad(*pud)); - - pmd = pmd_offset(pud, addr); - free_ptes(pmd, addr); - pmd_free(NULL, pmd); - pud_clear(pud); - } - mutex_unlock(&kvm_hyp_pgd_mutex); -} - -static void create_hyp_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end) -{ - pte_t *pte; - unsigned long addr; - struct page *page; - - for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { - pte = pte_offset_kernel(pmd, addr); - BUG_ON(!virt_addr_valid(addr)); - page = virt_to_page(addr); - kvm_set_pte(pte, mk_pte(page, PAGE_HYP)); - } -} - -static void create_hyp_io_pte_mappings(pmd_t *pmd, unsigned long start, - unsigned long end, - unsigned long *pfn_base) -{ - pte_t *pte; - unsigned long addr; - - for (addr = start & PAGE_MASK; addr < end; addr += PAGE_SIZE) { - pte = pte_offset_kernel(pmd, addr); - BUG_ON(pfn_valid(*pfn_base)); - kvm_set_pte(pte, pfn_pte(*pfn_base, PAGE_HYP_DEVICE)); - (*pfn_base)++; - } -} - -static int create_hyp_pmd_mappings(pud_t *pud, unsigned long start, - unsigned long end, unsigned long *pfn_base) -{ - pmd_t *pmd; - pte_t *pte; - unsigned long addr, next; - - for (addr = start; addr < end; addr = next) { - pmd = pmd_offset(pud, addr); - - BUG_ON(pmd_sect(*pmd)); - - if (pmd_none(*pmd)) { - pte = pte_alloc_one_kernel(NULL, addr); - if (!pte) { - kvm_err("Cannot allocate Hyp pte\n"); - return -ENOMEM; - } - pmd_populate_kernel(NULL, pmd, pte); - } - - next = pmd_addr_end(addr, end); - - /* - * If pfn_base is NULL, we map kernel pages into HYP with the - * virtual address. Otherwise, this is considered an I/O - * mapping and we map the physical region starting at - * *pfn_base to [start, end[. - */ - if (!pfn_base) - create_hyp_pte_mappings(pmd, addr, next); - else - create_hyp_io_pte_mappings(pmd, addr, next, pfn_base); - } - - return 0; -} - -static int __create_hyp_mappings(void *from, void *to, unsigned long *pfn_base) -{ - unsigned long start = (unsigned long)from; - unsigned long end = (unsigned long)to; - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - unsigned long addr, next; - int err = 0; - - BUG_ON(start > end); - if (start < PAGE_OFFSET) - return -EINVAL; - - mutex_lock(&kvm_hyp_pgd_mutex); - for (addr = start; addr < end; addr = next) { - pgd = hyp_pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - - if (pud_none_or_clear_bad(pud)) { - pmd = pmd_alloc_one(NULL, addr); - if (!pmd) { - kvm_err("Cannot allocate Hyp pmd\n"); - err = -ENOMEM; - goto out; - } - pud_populate(NULL, pud, pmd); - } - - next = pgd_addr_end(addr, end); - err = create_hyp_pmd_mappings(pud, addr, next, pfn_base); - if (err) - goto out; - } -out: - mutex_unlock(&kvm_hyp_pgd_mutex); - return err; -} - -/** - * create_hyp_mappings - map a kernel virtual address range in Hyp mode - * @from: The virtual kernel start address of the range - * @to: The virtual kernel end address of the range (exclusive) - * - * The same virtual address as the kernel virtual address is also used in - * Hyp-mode mapping to the same underlying physical pages. - * - * Note: Wrapping around zero in the "to" address is not supported. - */ -int create_hyp_mappings(void *from, void *to) -{ - return __create_hyp_mappings(from, to, NULL); -} - -/** - * create_hyp_io_mappings - map a physical IO range in Hyp mode - * @from: The virtual HYP start address of the range - * @to: The virtual HYP end address of the range (exclusive) - * @addr: The physical start address which gets mapped - */ -int create_hyp_io_mappings(void *from, void *to, phys_addr_t addr) -{ - unsigned long pfn = __phys_to_pfn(addr); - return __create_hyp_mappings(from, to, &pfn); -} - -/** - * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. - * @kvm: The KVM struct pointer for the VM. - * - * Allocates the 1st level table only of size defined by S2_PGD_ORDER (can - * support either full 40-bit input addresses or limited to 32-bit input - * addresses). Clears the allocated pages. - * - * Note we don't need locking here as this is only called when the VM is - * created, which can only be done once. - */ -int kvm_alloc_stage2_pgd(struct kvm *kvm) -{ - pgd_t *pgd; - - if (kvm->arch.pgd != NULL) { - kvm_err("kvm_arch already initialized?\n"); - return -EINVAL; - } - - pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, S2_PGD_ORDER); - if (!pgd) - return -ENOMEM; - - /* stage-2 pgd must be aligned to its size */ - VM_BUG_ON((unsigned long)pgd & (S2_PGD_SIZE - 1)); - - memset(pgd, 0, PTRS_PER_S2_PGD * sizeof(pgd_t)); - clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t)); - kvm->arch.pgd = pgd; - - return 0; -} - -static void clear_pud_entry(pud_t *pud) -{ - pmd_t *pmd_table = pmd_offset(pud, 0); - pud_clear(pud); - pmd_free(NULL, pmd_table); - put_page(virt_to_page(pud)); -} - -static void clear_pmd_entry(pmd_t *pmd) -{ - pte_t *pte_table = pte_offset_kernel(pmd, 0); - pmd_clear(pmd); - pte_free_kernel(NULL, pte_table); - put_page(virt_to_page(pmd)); -} - -static bool pmd_empty(pmd_t *pmd) -{ - struct page *pmd_page = virt_to_page(pmd); - return page_count(pmd_page) == 1; -} - -static void clear_pte_entry(pte_t *pte) -{ - if (pte_present(*pte)) { - kvm_set_pte(pte, __pte(0)); - put_page(virt_to_page(pte)); - } -} - -static bool pte_empty(pte_t *pte) -{ - struct page *pte_page = virt_to_page(pte); - return page_count(pte_page) == 1; -} - -/** - * unmap_stage2_range -- Clear stage2 page table entries to unmap a range - * @kvm: The VM pointer - * @start: The intermediate physical base address of the range to unmap - * @size: The size of the area to unmap - * - * Clear a range of stage-2 mappings, lowering the various ref-counts. Must - * be called while holding mmu_lock (unless for freeing the stage2 pgd before - * destroying the VM), otherwise another faulting VCPU may come in and mess - * with things behind our backs. - */ -static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - phys_addr_t addr = start, end = start + size; - u64 range; - - while (addr < end) { - pgd = kvm->arch.pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - addr += PUD_SIZE; - continue; - } - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd)) { - addr += PMD_SIZE; - continue; - } - - pte = pte_offset_kernel(pmd, addr); - clear_pte_entry(pte); - range = PAGE_SIZE; - - /* If we emptied the pte, walk back up the ladder */ - if (pte_empty(pte)) { - clear_pmd_entry(pmd); - range = PMD_SIZE; - if (pmd_empty(pmd)) { - clear_pud_entry(pud); - range = PUD_SIZE; - } - } - - addr += range; - } -} - -/** - * kvm_free_stage2_pgd - free all stage-2 tables - * @kvm: The KVM struct pointer for the VM. - * - * Walks the level-1 page table pointed to by kvm->arch.pgd and frees all - * underlying level-2 and level-3 tables before freeing the actual level-1 table - * and setting the struct pointer to NULL. - * - * Note we don't need locking here as this is only called when the VM is - * destroyed, which can only be done once. - */ -void kvm_free_stage2_pgd(struct kvm *kvm) -{ - if (kvm->arch.pgd == NULL) - return; - - unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); - free_pages((unsigned long)kvm->arch.pgd, S2_PGD_ORDER); - kvm->arch.pgd = NULL; -} - - -static int stage2_set_pte(struct kvm *kvm, struct kvm_mmu_memory_cache *cache, - phys_addr_t addr, const pte_t *new_pte, bool iomap) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd; - pte_t *pte, old_pte; - - /* Create 2nd stage page table mapping - Level 1 */ - pgd = kvm->arch.pgd + pgd_index(addr); - pud = pud_offset(pgd, addr); - if (pud_none(*pud)) { - if (!cache) - return 0; /* ignore calls from kvm_set_spte_hva */ - pmd = mmu_memory_cache_alloc(cache); - pud_populate(NULL, pud, pmd); - pmd += pmd_index(addr); - get_page(virt_to_page(pud)); - } else - pmd = pmd_offset(pud, addr); - - /* Create 2nd stage page table mapping - Level 2 */ - if (pmd_none(*pmd)) { - if (!cache) - return 0; /* ignore calls from kvm_set_spte_hva */ - pte = mmu_memory_cache_alloc(cache); - clean_pte_table(pte); - pmd_populate_kernel(NULL, pmd, pte); - pte += pte_index(addr); - get_page(virt_to_page(pmd)); - } else - pte = pte_offset_kernel(pmd, addr); - - if (iomap && pte_present(*pte)) - return -EFAULT; - - /* Create 2nd stage page table mapping - Level 3 */ - old_pte = *pte; - kvm_set_pte(pte, *new_pte); - if (pte_present(old_pte)) - kvm_tlb_flush_vmid(kvm); - else - get_page(virt_to_page(pte)); - - return 0; -} - -/** - * kvm_phys_addr_ioremap - map a device range to guest IPA - * - * @kvm: The KVM pointer - * @guest_ipa: The IPA at which to insert the mapping - * @pa: The physical address of the device - * @size: The size of the mapping - */ -int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, - phys_addr_t pa, unsigned long size) -{ - phys_addr_t addr, end; - int ret = 0; - unsigned long pfn; - struct kvm_mmu_memory_cache cache = { 0, }; - - end = (guest_ipa + size + PAGE_SIZE - 1) & PAGE_MASK; - pfn = __phys_to_pfn(pa); - - for (addr = guest_ipa; addr < end; addr += PAGE_SIZE) { - pte_t pte = pfn_pte(pfn, PAGE_S2_DEVICE | L_PTE_S2_RDWR); - - ret = mmu_topup_memory_cache(&cache, 2, 2); - if (ret) - goto out; - spin_lock(&kvm->mmu_lock); - ret = stage2_set_pte(kvm, &cache, addr, &pte, true); - spin_unlock(&kvm->mmu_lock); - if (ret) - goto out; - - pfn++; - } - -out: - mmu_free_memory_cache(&cache); - return ret; -} - -static void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn) -{ - /* - * If we are going to insert an instruction page and the icache is - * either VIPT or PIPT, there is a potential problem where the host - * (or another VM) may have used the same page as this guest, and we - * read incorrect data from the icache. If we're using a PIPT cache, - * we can invalidate just that page, but if we are using a VIPT cache - * we need to invalidate the entire icache - damn shame - as written - * in the ARM ARM (DDI 0406C.b - Page B3-1393). - * - * VIVT caches are tagged using both the ASID and the VMID and doesn't - * need any kind of flushing (DDI 0406C.b - Page B3-1392). - */ - if (icache_is_pipt()) { - unsigned long hva = gfn_to_hva(kvm, gfn); - __cpuc_coherent_user_range(hva, hva + PAGE_SIZE); - } else if (!icache_is_vivt_asid_tagged()) { - /* any kind of VIPT cache */ - __flush_icache_all(); - } -} - -static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, - gfn_t gfn, struct kvm_memory_slot *memslot, - unsigned long fault_status) -{ - pte_t new_pte; - pfn_t pfn; - int ret; - bool write_fault, writable; - unsigned long mmu_seq; - struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; - - write_fault = kvm_is_write_fault(vcpu->arch.hsr); - if (fault_status == FSC_PERM && !write_fault) { - kvm_err("Unexpected L2 read permission error\n"); - return -EFAULT; - } - - /* We need minimum second+third level pages */ - ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); - if (ret) - return ret; - - mmu_seq = vcpu->kvm->mmu_notifier_seq; - /* - * Ensure the read of mmu_notifier_seq happens before we call - * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk - * the page we just got a reference to gets unmapped before we have a - * chance to grab the mmu_lock, which ensure that if the page gets - * unmapped afterwards, the call to kvm_unmap_hva will take it away - * from us again properly. This smp_rmb() interacts with the smp_wmb() - * in kvm_mmu_notifier_invalidate_. - */ - smp_rmb(); - - pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); - if (is_error_pfn(pfn)) - return -EFAULT; - - new_pte = pfn_pte(pfn, PAGE_S2); - coherent_icache_guest_page(vcpu->kvm, gfn); - - spin_lock(&vcpu->kvm->mmu_lock); - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) - goto out_unlock; - if (writable) { - pte_val(new_pte) |= L_PTE_S2_RDWR; - kvm_set_pfn_dirty(pfn); - } - stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, false); - -out_unlock: - spin_unlock(&vcpu->kvm->mmu_lock); - kvm_release_pfn_clean(pfn); - return 0; -} - -/** - * kvm_handle_guest_abort - handles all 2nd stage aborts - * @vcpu: the VCPU pointer - * @run: the kvm_run structure - * - * Any abort that gets to the host is almost guaranteed to be caused by a - * missing second stage translation table entry, which can mean that either the - * guest simply needs more memory and we must allocate an appropriate page or it - * can mean that the guest tried to access I/O memory, which is emulated by user - * space. The distinction is based on the IPA causing the fault and whether this - * memory region has been registered as standard RAM by user space. - */ -int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run) -{ - unsigned long hsr_ec; - unsigned long fault_status; - phys_addr_t fault_ipa; - struct kvm_memory_slot *memslot; - bool is_iabt; - gfn_t gfn; - int ret, idx; - - hsr_ec = vcpu->arch.hsr >> HSR_EC_SHIFT; - is_iabt = (hsr_ec == HSR_EC_IABT); - fault_ipa = ((phys_addr_t)vcpu->arch.hpfar & HPFAR_MASK) << 8; - - trace_kvm_guest_fault(*vcpu_pc(vcpu), vcpu->arch.hsr, - vcpu->arch.hxfar, fault_ipa); - - /* Check the stage-2 fault is trans. fault or write fault */ - fault_status = (vcpu->arch.hsr & HSR_FSC_TYPE); - if (fault_status != FSC_FAULT && fault_status != FSC_PERM) { - kvm_err("Unsupported fault status: EC=%#lx DFCS=%#lx\n", - hsr_ec, fault_status); - return -EFAULT; - } - - idx = srcu_read_lock(&vcpu->kvm->srcu); - - gfn = fault_ipa >> PAGE_SHIFT; - if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) { - if (is_iabt) { - /* Prefetch Abort on I/O address */ - kvm_inject_pabt(vcpu, vcpu->arch.hxfar); - ret = 1; - goto out_unlock; - } - - if (fault_status != FSC_FAULT) { - kvm_err("Unsupported fault status on io memory: %#lx\n", - fault_status); - ret = -EFAULT; - goto out_unlock; - } - - /* Adjust page offset */ - fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK; - ret = io_mem_abort(vcpu, run, fault_ipa); - goto out_unlock; - } - - memslot = gfn_to_memslot(vcpu->kvm, gfn); - if (!memslot->user_alloc) { - kvm_err("non user-alloc memslots not supported\n"); - ret = -EINVAL; - goto out_unlock; - } - - ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status); - if (ret == 0) - ret = 1; -out_unlock: - srcu_read_unlock(&vcpu->kvm->srcu, idx); - return ret; -} - -static void handle_hva_to_gpa(struct kvm *kvm, - unsigned long start, - unsigned long end, - void (*handler)(struct kvm *kvm, - gpa_t gpa, void *data), - void *data) -{ - struct kvm_memslots *slots; - struct kvm_memory_slot *memslot; - - slots = kvm_memslots(kvm); - - /* we only care about the pages that the guest sees */ - kvm_for_each_memslot(memslot, slots) { - unsigned long hva_start, hva_end; - gfn_t gfn, gfn_end; - - hva_start = max(start, memslot->userspace_addr); - hva_end = min(end, memslot->userspace_addr + - (memslot->npages << PAGE_SHIFT)); - if (hva_start >= hva_end) - continue; - - /* - * {gfn(page) | page intersects with [hva_start, hva_end)} = - * {gfn_start, gfn_start+1, ..., gfn_end-1}. - */ - gfn = hva_to_gfn_memslot(hva_start, memslot); - gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); - - for (; gfn < gfn_end; ++gfn) { - gpa_t gpa = gfn << PAGE_SHIFT; - handler(kvm, gpa, data); - } - } -} - -static void kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, void *data) -{ - unmap_stage2_range(kvm, gpa, PAGE_SIZE); - kvm_tlb_flush_vmid(kvm); -} - -int kvm_unmap_hva(struct kvm *kvm, unsigned long hva) -{ - unsigned long end = hva + PAGE_SIZE; - - if (!kvm->arch.pgd) - return 0; - - trace_kvm_unmap_hva(hva); - handle_hva_to_gpa(kvm, hva, end, &kvm_unmap_hva_handler, NULL); - return 0; -} - -int kvm_unmap_hva_range(struct kvm *kvm, - unsigned long start, unsigned long end) -{ - if (!kvm->arch.pgd) - return 0; - - trace_kvm_unmap_hva_range(start, end); - handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); - return 0; -} - -static void kvm_set_spte_handler(struct kvm *kvm, gpa_t gpa, void *data) -{ - pte_t *pte = (pte_t *)data; - - stage2_set_pte(kvm, NULL, gpa, pte, false); -} - - -void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) -{ - unsigned long end = hva + PAGE_SIZE; - pte_t stage2_pte; - - if (!kvm->arch.pgd) - return; - - trace_kvm_set_spte_hva(hva); - stage2_pte = pfn_pte(pte_pfn(pte), PAGE_S2); - handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &stage2_pte); -} - -void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) -{ - mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); -} - -phys_addr_t kvm_mmu_get_httbr(void) -{ - VM_BUG_ON(!virt_addr_valid(hyp_pgd)); - return virt_to_phys(hyp_pgd); -} - -int kvm_mmu_init(void) -{ - if (!hyp_pgd) { - kvm_err("Hyp mode PGD not allocated\n"); - return -ENOMEM; - } - - return 0; -} - -/** - * kvm_clear_idmap - remove all idmaps from the hyp pgd - * - * Free the underlying pmds for all pgds in range and clear the pgds (but - * don't free them) afterwards. - */ -void kvm_clear_hyp_idmap(void) -{ - unsigned long addr, end; - unsigned long next; - pgd_t *pgd = hyp_pgd; - pud_t *pud; - pmd_t *pmd; - - addr = virt_to_phys(__hyp_idmap_text_start); - end = virt_to_phys(__hyp_idmap_text_end); - - pgd += pgd_index(addr); - do { - next = pgd_addr_end(addr, end); - if (pgd_none_or_clear_bad(pgd)) - continue; - pud = pud_offset(pgd, addr); - pmd = pmd_offset(pud, addr); - - pud_clear(pud); - clean_pmd_entry(pmd); - pmd_free(NULL, (pmd_t *)((unsigned long)pmd & PAGE_MASK)); - } while (pgd++, addr = next, addr < end); -} diff --git a/trunk/arch/arm/kvm/psci.c b/trunk/arch/arm/kvm/psci.c deleted file mode 100644 index 7ee5bb7a3667..000000000000 --- a/trunk/arch/arm/kvm/psci.c +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Copyright (C) 2012 - ARM Ltd - * Author: Marc Zyngier - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ - -#include -#include - -#include -#include - -/* - * This is an implementation of the Power State Coordination Interface - * as described in ARM document number ARM DEN 0022A. - */ - -static void kvm_psci_vcpu_off(struct kvm_vcpu *vcpu) -{ - vcpu->arch.pause = true; -} - -static unsigned long kvm_psci_vcpu_on(struct kvm_vcpu *source_vcpu) -{ - struct kvm *kvm = source_vcpu->kvm; - struct kvm_vcpu *vcpu; - wait_queue_head_t *wq; - unsigned long cpu_id; - phys_addr_t target_pc; - - cpu_id = *vcpu_reg(source_vcpu, 1); - if (vcpu_mode_is_32bit(source_vcpu)) - cpu_id &= ~((u32) 0); - - if (cpu_id >= atomic_read(&kvm->online_vcpus)) - return KVM_PSCI_RET_INVAL; - - target_pc = *vcpu_reg(source_vcpu, 2); - - vcpu = kvm_get_vcpu(kvm, cpu_id); - - wq = kvm_arch_vcpu_wq(vcpu); - if (!waitqueue_active(wq)) - return KVM_PSCI_RET_INVAL; - - kvm_reset_vcpu(vcpu); - - /* Gracefully handle Thumb2 entry point */ - if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) { - target_pc &= ~((phys_addr_t) 1); - vcpu_set_thumb(vcpu); - } - - *vcpu_pc(vcpu) = target_pc; - vcpu->arch.pause = false; - smp_mb(); /* Make sure the above is visible */ - - wake_up_interruptible(wq); - - return KVM_PSCI_RET_SUCCESS; -} - -/** - * kvm_psci_call - handle PSCI call if r0 value is in range - * @vcpu: Pointer to the VCPU struct - * - * Handle PSCI calls from guests through traps from HVC or SMC instructions. - * The calling convention is similar to SMC calls to the secure world where - * the function number is placed in r0 and this function returns true if the - * function number specified in r0 is withing the PSCI range, and false - * otherwise. - */ -bool kvm_psci_call(struct kvm_vcpu *vcpu) -{ - unsigned long psci_fn = *vcpu_reg(vcpu, 0) & ~((u32) 0); - unsigned long val; - - switch (psci_fn) { - case KVM_PSCI_FN_CPU_OFF: - kvm_psci_vcpu_off(vcpu); - val = KVM_PSCI_RET_SUCCESS; - break; - case KVM_PSCI_FN_CPU_ON: - val = kvm_psci_vcpu_on(vcpu); - break; - case KVM_PSCI_FN_CPU_SUSPEND: - case KVM_PSCI_FN_MIGRATE: - val = KVM_PSCI_RET_NI; - break; - - default: - return false; - } - - *vcpu_reg(vcpu, 0) = val; - return true; -} diff --git a/trunk/arch/arm/kvm/reset.c b/trunk/arch/arm/kvm/reset.c deleted file mode 100644 index b80256b554cd..000000000000 --- a/trunk/arch/arm/kvm/reset.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Copyright (C) 2012 - Virtual Open Systems and Columbia University - * Author: Christoffer Dall - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License, version 2, as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. - */ -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -/****************************************************************************** - * Cortex-A15 Reset Values - */ - -static const int a15_max_cpu_idx = 3; - -static struct kvm_regs a15_regs_reset = { - .usr_regs.ARM_cpsr = SVC_MODE | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT, -}; - - -/******************************************************************************* - * Exported reset function - */ - -/** - * kvm_reset_vcpu - sets core registers and cp15 registers to reset value - * @vcpu: The VCPU pointer - * - * This function finds the right table above and sets the registers on the - * virtual CPU struct to their architectually defined reset values. - */ -int kvm_reset_vcpu(struct kvm_vcpu *vcpu) -{ - struct kvm_regs *cpu_reset; - - switch (vcpu->arch.target) { - case KVM_ARM_TARGET_CORTEX_A15: - if (vcpu->vcpu_id > a15_max_cpu_idx) - return -EINVAL; - cpu_reset = &a15_regs_reset; - vcpu->arch.midr = read_cpuid_id(); - break; - default: - return -ENODEV; - } - - /* Reset core registers */ - memcpy(&vcpu->arch.regs, cpu_reset, sizeof(vcpu->arch.regs)); - - /* Reset CP15 registers */ - kvm_reset_coprocs(vcpu); - - return 0; -} diff --git a/trunk/arch/arm/kvm/trace.h b/trunk/arch/arm/kvm/trace.h deleted file mode 100644 index a8e73ed5ad5b..000000000000 --- a/trunk/arch/arm/kvm/trace.h +++ /dev/null @@ -1,235 +0,0 @@ -#if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) -#define _TRACE_KVM_H - -#include - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM kvm - -/* - * Tracepoints for entry/exit to guest - */ -TRACE_EVENT(kvm_entry, - TP_PROTO(unsigned long vcpu_pc), - TP_ARGS(vcpu_pc), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - ), - - TP_printk("PC: 0x%08lx", __entry->vcpu_pc) -); - -TRACE_EVENT(kvm_exit, - TP_PROTO(unsigned long vcpu_pc), - TP_ARGS(vcpu_pc), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - ), - - TP_printk("PC: 0x%08lx", __entry->vcpu_pc) -); - -TRACE_EVENT(kvm_guest_fault, - TP_PROTO(unsigned long vcpu_pc, unsigned long hsr, - unsigned long hxfar, - unsigned long long ipa), - TP_ARGS(vcpu_pc, hsr, hxfar, ipa), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( unsigned long, hsr ) - __field( unsigned long, hxfar ) - __field( unsigned long long, ipa ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->hsr = hsr; - __entry->hxfar = hxfar; - __entry->ipa = ipa; - ), - - TP_printk("guest fault at PC %#08lx (hxfar %#08lx, " - "ipa %#16llx, hsr %#08lx", - __entry->vcpu_pc, __entry->hxfar, - __entry->ipa, __entry->hsr) -); - -TRACE_EVENT(kvm_irq_line, - TP_PROTO(unsigned int type, int vcpu_idx, int irq_num, int level), - TP_ARGS(type, vcpu_idx, irq_num, level), - - TP_STRUCT__entry( - __field( unsigned int, type ) - __field( int, vcpu_idx ) - __field( int, irq_num ) - __field( int, level ) - ), - - TP_fast_assign( - __entry->type = type; - __entry->vcpu_idx = vcpu_idx; - __entry->irq_num = irq_num; - __entry->level = level; - ), - - TP_printk("Inject %s interrupt (%d), vcpu->idx: %d, num: %d, level: %d", - (__entry->type == KVM_ARM_IRQ_TYPE_CPU) ? "CPU" : - (__entry->type == KVM_ARM_IRQ_TYPE_PPI) ? "VGIC PPI" : - (__entry->type == KVM_ARM_IRQ_TYPE_SPI) ? "VGIC SPI" : "UNKNOWN", - __entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level) -); - -TRACE_EVENT(kvm_mmio_emulate, - TP_PROTO(unsigned long vcpu_pc, unsigned long instr, - unsigned long cpsr), - TP_ARGS(vcpu_pc, instr, cpsr), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( unsigned long, instr ) - __field( unsigned long, cpsr ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->instr = instr; - __entry->cpsr = cpsr; - ), - - TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)", - __entry->vcpu_pc, __entry->instr, __entry->cpsr) -); - -/* Architecturally implementation defined CP15 register access */ -TRACE_EVENT(kvm_emulate_cp15_imp, - TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn, - unsigned long CRm, unsigned long Op2, bool is_write), - TP_ARGS(Op1, Rt1, CRn, CRm, Op2, is_write), - - TP_STRUCT__entry( - __field( unsigned int, Op1 ) - __field( unsigned int, Rt1 ) - __field( unsigned int, CRn ) - __field( unsigned int, CRm ) - __field( unsigned int, Op2 ) - __field( bool, is_write ) - ), - - TP_fast_assign( - __entry->is_write = is_write; - __entry->Op1 = Op1; - __entry->Rt1 = Rt1; - __entry->CRn = CRn; - __entry->CRm = CRm; - __entry->Op2 = Op2; - ), - - TP_printk("Implementation defined CP15: %s\tp15, %u, r%u, c%u, c%u, %u", - (__entry->is_write) ? "mcr" : "mrc", - __entry->Op1, __entry->Rt1, __entry->CRn, - __entry->CRm, __entry->Op2) -); - -TRACE_EVENT(kvm_wfi, - TP_PROTO(unsigned long vcpu_pc), - TP_ARGS(vcpu_pc), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - ), - - TP_printk("guest executed wfi at: 0x%08lx", __entry->vcpu_pc) -); - -TRACE_EVENT(kvm_unmap_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier unmap hva: %#08lx", __entry->hva) -); - -TRACE_EVENT(kvm_unmap_hva_range, - TP_PROTO(unsigned long start, unsigned long end), - TP_ARGS(start, end), - - TP_STRUCT__entry( - __field( unsigned long, start ) - __field( unsigned long, end ) - ), - - TP_fast_assign( - __entry->start = start; - __entry->end = end; - ), - - TP_printk("mmu notifier unmap range: %#08lx -- %#08lx", - __entry->start, __entry->end) -); - -TRACE_EVENT(kvm_set_spte_hva, - TP_PROTO(unsigned long hva), - TP_ARGS(hva), - - TP_STRUCT__entry( - __field( unsigned long, hva ) - ), - - TP_fast_assign( - __entry->hva = hva; - ), - - TP_printk("mmu notifier set pte hva: %#08lx", __entry->hva) -); - -TRACE_EVENT(kvm_hvc, - TP_PROTO(unsigned long vcpu_pc, unsigned long r0, unsigned long imm), - TP_ARGS(vcpu_pc, r0, imm), - - TP_STRUCT__entry( - __field( unsigned long, vcpu_pc ) - __field( unsigned long, r0 ) - __field( unsigned long, imm ) - ), - - TP_fast_assign( - __entry->vcpu_pc = vcpu_pc; - __entry->r0 = r0; - __entry->imm = imm; - ), - - TP_printk("HVC at 0x%08lx (r0: 0x%08lx, imm: 0x%lx", - __entry->vcpu_pc, __entry->r0, __entry->imm) -); - -#endif /* _TRACE_KVM_H */ - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH arch/arm/kvm -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE trace - -/* This part must be outside protection */ -#include diff --git a/trunk/arch/arm/mach-at91/at91rm9200_time.c b/trunk/arch/arm/mach-at91/at91rm9200_time.c index 2acdff4c1dfe..cafe98836c8a 100644 --- a/trunk/arch/arm/mach-at91/at91rm9200_time.c +++ b/trunk/arch/arm/mach-at91/at91rm9200_time.c @@ -174,6 +174,7 @@ clkevt32k_next_event(unsigned long delta, struct clock_event_device *dev) static struct clock_event_device clkevt = { .name = "at91_tick", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .shift = 32, .rating = 150, .set_next_event = clkevt32k_next_event, .set_mode = clkevt32k_mode, @@ -264,10 +265,17 @@ void __init at91rm9200_timer_init(void) at91_st_write(AT91_ST_RTMR, 1); /* Setup timer clockevent, with minimum of two ticks (important!!) */ + clkevt.mult = div_sc(AT91_SLOW_CLOCK, NSEC_PER_SEC, clkevt.shift); + clkevt.max_delta_ns = clockevent_delta2ns(AT91_ST_ALMV, &clkevt); + clkevt.min_delta_ns = clockevent_delta2ns(2, &clkevt) + 1; clkevt.cpumask = cpumask_of(0); - clockevents_config_and_register(&clkevt, AT91_SLOW_CLOCK, - 2, AT91_ST_ALMV); + clockevents_register_device(&clkevt); /* register clocksource */ clocksource_register_hz(&clk32k, AT91_SLOW_CLOCK); } + +struct sys_timer at91rm9200_timer = { + .init = at91rm9200_timer_init, +}; + diff --git a/trunk/arch/arm/mach-at91/at91sam926x_time.c b/trunk/arch/arm/mach-at91/at91sam926x_time.c index 3a4bc2e1a65e..358412f1f5f8 100644 --- a/trunk/arch/arm/mach-at91/at91sam926x_time.c +++ b/trunk/arch/arm/mach-at91/at91sam926x_time.c @@ -104,38 +104,12 @@ pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) } } -static void at91sam926x_pit_suspend(struct clock_event_device *cedev) -{ - /* Disable timer */ - pit_write(AT91_PIT_MR, 0); -} - -static void at91sam926x_pit_reset(void) -{ - /* Disable timer and irqs */ - pit_write(AT91_PIT_MR, 0); - - /* Clear any pending interrupts, wait for PIT to stop counting */ - while (PIT_CPIV(pit_read(AT91_PIT_PIVR)) != 0) - cpu_relax(); - - /* Start PIT but don't enable IRQ */ - pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); -} - -static void at91sam926x_pit_resume(struct clock_event_device *cedev) -{ - at91sam926x_pit_reset(); -} - static struct clock_event_device pit_clkevt = { .name = "pit", .features = CLOCK_EVT_FEAT_PERIODIC, .shift = 32, .rating = 100, .set_mode = pit_clkevt_mode, - .suspend = at91sam926x_pit_suspend, - .resume = at91sam926x_pit_resume, }; @@ -176,6 +150,19 @@ static struct irqaction at91sam926x_pit_irq = { .irq = NR_IRQS_LEGACY + AT91_ID_SYS, }; +static void at91sam926x_pit_reset(void) +{ + /* Disable timer and irqs */ + pit_write(AT91_PIT_MR, 0); + + /* Clear any pending interrupts, wait for PIT to stop counting */ + while (PIT_CPIV(pit_read(AT91_PIT_PIVR)) != 0) + cpu_relax(); + + /* Start PIT but don't enable IRQ */ + pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); +} + #ifdef CONFIG_OF static struct of_device_id pit_timer_ids[] = { { .compatible = "atmel,at91sam9260-pit" }, @@ -224,7 +211,7 @@ static int __init of_at91sam926x_pit_init(void) /* * Set up both clocksource and clockevent support. */ -void __init at91sam926x_pit_init(void) +static void __init at91sam926x_pit_init(void) { unsigned long pit_rate; unsigned bits; @@ -263,6 +250,12 @@ void __init at91sam926x_pit_init(void) clockevents_register_device(&pit_clkevt); } +static void at91sam926x_pit_suspend(void) +{ + /* Disable timer */ + pit_write(AT91_PIT_MR, 0); +} + void __init at91sam926x_ioremap_pit(u32 addr) { #if defined(CONFIG_OF) @@ -279,3 +272,9 @@ void __init at91sam926x_ioremap_pit(u32 addr) if (!pit_base_addr) panic("Impossible to ioremap PIT\n"); } + +struct sys_timer at91sam926x_timer = { + .init = at91sam926x_pit_init, + .suspend = at91sam926x_pit_suspend, + .resume = at91sam926x_pit_reset, +}; diff --git a/trunk/arch/arm/mach-at91/at91x40_time.c b/trunk/arch/arm/mach-at91/at91x40_time.c index 0c07a4459cb2..0e57e440c061 100644 --- a/trunk/arch/arm/mach-at91/at91x40_time.c +++ b/trunk/arch/arm/mach-at91/at91x40_time.c @@ -42,10 +42,9 @@ #define AT91_TC_CLK1BASE 0x40 #define AT91_TC_CLK2BASE 0x80 -static u32 at91x40_gettimeoffset(void) +static unsigned long at91x40_gettimeoffset(void) { - return (at91_tc_read(AT91_TC_CLK1BASE + AT91_TC_CV) * 1000000 / - (AT91X40_MASTER_CLOCK / 128)) * 1000; + return (at91_tc_read(AT91_TC_CLK1BASE + AT91_TC_CV) * 1000000 / (AT91X40_MASTER_CLOCK / 128)); } static irqreturn_t at91x40_timer_interrupt(int irq, void *dev_id) @@ -65,8 +64,6 @@ void __init at91x40_timer_init(void) { unsigned int v; - arch_gettimeoffset = at91x40_gettimeoffset; - at91_tc_write(AT91_TC_BCR, 0); v = at91_tc_read(AT91_TC_BMR); v = (v & ~AT91_TC_TC1XC1S) | AT91_TC_TC1XC1S_NONE; @@ -82,3 +79,9 @@ void __init at91x40_timer_init(void) at91_tc_write(AT91_TC_CLK1BASE + AT91_TC_CCR, (AT91_TC_SWTRG | AT91_TC_CLKEN)); } + +struct sys_timer at91x40_timer = { + .init = at91x40_timer_init, + .offset = at91x40_gettimeoffset, +}; + diff --git a/trunk/arch/arm/mach-at91/board-1arm.c b/trunk/arch/arm/mach-at91/board-1arm.c index 35ab632bbf68..b99b5752cc10 100644 --- a/trunk/arch/arm/mach-at91/board-1arm.c +++ b/trunk/arch/arm/mach-at91/board-1arm.c @@ -90,7 +90,7 @@ static void __init onearm_board_init(void) MACHINE_START(ONEARM, "Ajeco 1ARM single board computer") /* Maintainer: Lennert Buytenhek */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = onearm_init_early, diff --git a/trunk/arch/arm/mach-at91/board-afeb-9260v1.c b/trunk/arch/arm/mach-at91/board-afeb-9260v1.c index f95e31cda4b3..854b97974287 100644 --- a/trunk/arch/arm/mach-at91/board-afeb-9260v1.c +++ b/trunk/arch/arm/mach-at91/board-afeb-9260v1.c @@ -210,7 +210,7 @@ static void __init afeb9260_board_init(void) MACHINE_START(AFEB9260, "Custom afeb9260 board") /* Maintainer: Sergey Lapin */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = afeb9260_init_early, diff --git a/trunk/arch/arm/mach-at91/board-cam60.c b/trunk/arch/arm/mach-at91/board-cam60.c index ade948b82662..28a18ce6d914 100644 --- a/trunk/arch/arm/mach-at91/board-cam60.c +++ b/trunk/arch/arm/mach-at91/board-cam60.c @@ -187,7 +187,7 @@ static void __init cam60_board_init(void) MACHINE_START(CAM60, "KwikByte CAM60") /* Maintainer: KwikByte */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = cam60_init_early, diff --git a/trunk/arch/arm/mach-at91/board-carmeva.c b/trunk/arch/arm/mach-at91/board-carmeva.c index 92983050a9bd..c17bb533a949 100644 --- a/trunk/arch/arm/mach-at91/board-carmeva.c +++ b/trunk/arch/arm/mach-at91/board-carmeva.c @@ -157,7 +157,7 @@ static void __init carmeva_board_init(void) MACHINE_START(CARMEVA, "Carmeva") /* Maintainer: Conitec Datasystems */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = carmeva_init_early, diff --git a/trunk/arch/arm/mach-at91/board-cpu9krea.c b/trunk/arch/arm/mach-at91/board-cpu9krea.c index 008527efdbcf..847432441ecc 100644 --- a/trunk/arch/arm/mach-at91/board-cpu9krea.c +++ b/trunk/arch/arm/mach-at91/board-cpu9krea.c @@ -374,7 +374,7 @@ MACHINE_START(CPUAT9260, "Eukrea CPU9260") MACHINE_START(CPUAT9G20, "Eukrea CPU9G20") #endif /* Maintainer: Eric Benard - EUKREA Electromatique */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = cpu9krea_init_early, diff --git a/trunk/arch/arm/mach-at91/board-cpuat91.c b/trunk/arch/arm/mach-at91/board-cpuat91.c index 42f1353a4baf..2a7af7868747 100644 --- a/trunk/arch/arm/mach-at91/board-cpuat91.c +++ b/trunk/arch/arm/mach-at91/board-cpuat91.c @@ -178,7 +178,7 @@ static void __init cpuat91_board_init(void) MACHINE_START(CPUAT91, "Eukrea") /* Maintainer: Eric Benard - EUKREA Electromatique */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = cpuat91_init_early, diff --git a/trunk/arch/arm/mach-at91/board-csb337.c b/trunk/arch/arm/mach-at91/board-csb337.c index e5fde215225b..48a531e05be3 100644 --- a/trunk/arch/arm/mach-at91/board-csb337.c +++ b/trunk/arch/arm/mach-at91/board-csb337.c @@ -251,7 +251,7 @@ static void __init csb337_board_init(void) MACHINE_START(CSB337, "Cogent CSB337") /* Maintainer: Bill Gatliff */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = csb337_init_early, diff --git a/trunk/arch/arm/mach-at91/board-csb637.c b/trunk/arch/arm/mach-at91/board-csb637.c index fdf11061c577..ec0f3abd504b 100644 --- a/trunk/arch/arm/mach-at91/board-csb637.c +++ b/trunk/arch/arm/mach-at91/board-csb637.c @@ -132,7 +132,7 @@ static void __init csb637_board_init(void) MACHINE_START(CSB637, "Cogent CSB637") /* Maintainer: Bill Gatliff */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = csb637_init_early, diff --git a/trunk/arch/arm/mach-at91/board-dt.c b/trunk/arch/arm/mach-at91/board-dt.c index 8db30132abed..881170ce61dd 100644 --- a/trunk/arch/arm/mach-at91/board-dt.c +++ b/trunk/arch/arm/mach-at91/board-dt.c @@ -49,7 +49,7 @@ static const char *at91_dt_board_compat[] __initdata = { DT_MACHINE_START(at91sam_dt, "Atmel AT91SAM (Device Tree)") /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = at91_dt_initialize, diff --git a/trunk/arch/arm/mach-at91/board-eb01.c b/trunk/arch/arm/mach-at91/board-eb01.c index becf0a6a289e..b489388a6f84 100644 --- a/trunk/arch/arm/mach-at91/board-eb01.c +++ b/trunk/arch/arm/mach-at91/board-eb01.c @@ -44,7 +44,7 @@ static void __init at91eb01_init_early(void) MACHINE_START(AT91EB01, "Atmel AT91 EB01") /* Maintainer: Greg Ungerer */ - .init_time = at91x40_timer_init, + .timer = &at91x40_timer, .handle_irq = at91_aic_handle_irq, .init_early = at91eb01_init_early, .init_irq = at91eb01_init_irq, diff --git a/trunk/arch/arm/mach-at91/board-eb9200.c b/trunk/arch/arm/mach-at91/board-eb9200.c index f9be8161bbfa..9f5e71c95f05 100644 --- a/trunk/arch/arm/mach-at91/board-eb9200.c +++ b/trunk/arch/arm/mach-at91/board-eb9200.c @@ -116,7 +116,7 @@ static void __init eb9200_board_init(void) } MACHINE_START(ATEB9200, "Embest ATEB9200") - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = eb9200_init_early, diff --git a/trunk/arch/arm/mach-at91/board-ecbat91.c b/trunk/arch/arm/mach-at91/board-ecbat91.c index b2fcd71262ba..ef69e0ebe949 100644 --- a/trunk/arch/arm/mach-at91/board-ecbat91.c +++ b/trunk/arch/arm/mach-at91/board-ecbat91.c @@ -181,7 +181,7 @@ static void __init ecb_at91board_init(void) MACHINE_START(ECBAT91, "emQbit's ECB_AT91") /* Maintainer: emQbit.com */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ecb_at91init_early, diff --git a/trunk/arch/arm/mach-at91/board-eco920.c b/trunk/arch/arm/mach-at91/board-eco920.c index 77de410efc90..50f3d3795c05 100644 --- a/trunk/arch/arm/mach-at91/board-eco920.c +++ b/trunk/arch/arm/mach-at91/board-eco920.c @@ -149,7 +149,7 @@ static void __init eco920_board_init(void) MACHINE_START(ECO920, "eco920") /* Maintainer: Sascha Hauer */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = eco920_init_early, diff --git a/trunk/arch/arm/mach-at91/board-flexibity.c b/trunk/arch/arm/mach-at91/board-flexibity.c index 737c08563628..5d44eba0f20f 100644 --- a/trunk/arch/arm/mach-at91/board-flexibity.c +++ b/trunk/arch/arm/mach-at91/board-flexibity.c @@ -159,7 +159,7 @@ static void __init flexibity_board_init(void) MACHINE_START(FLEXIBITY, "Flexibity Connect") /* Maintainer: Maxim Osipov */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = flexibity_init_early, diff --git a/trunk/arch/arm/mach-at91/board-foxg20.c b/trunk/arch/arm/mach-at91/board-foxg20.c index 2ea7059b840b..191d37c16bab 100644 --- a/trunk/arch/arm/mach-at91/board-foxg20.c +++ b/trunk/arch/arm/mach-at91/board-foxg20.c @@ -261,7 +261,7 @@ static void __init foxg20_board_init(void) MACHINE_START(ACMENETUSFOXG20, "Acme Systems srl FOX Board G20") /* Maintainer: Sergio Tanzilli */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = foxg20_init_early, diff --git a/trunk/arch/arm/mach-at91/board-gsia18s.c b/trunk/arch/arm/mach-at91/board-gsia18s.c index c1d61d247790..23a2fa17ab29 100644 --- a/trunk/arch/arm/mach-at91/board-gsia18s.c +++ b/trunk/arch/arm/mach-at91/board-gsia18s.c @@ -574,7 +574,7 @@ static void __init gsia18s_board_init(void) } MACHINE_START(GSIA18S, "GS_IA18_S") - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = gsia18s_init_early, diff --git a/trunk/arch/arm/mach-at91/board-kafa.c b/trunk/arch/arm/mach-at91/board-kafa.c index 88e2f5d2d16d..9a43d1e1a037 100644 --- a/trunk/arch/arm/mach-at91/board-kafa.c +++ b/trunk/arch/arm/mach-at91/board-kafa.c @@ -103,7 +103,7 @@ static void __init kafa_board_init(void) MACHINE_START(KAFA, "Sperry-Sun KAFA") /* Maintainer: Sergei Sharonov */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = kafa_init_early, diff --git a/trunk/arch/arm/mach-at91/board-kb9202.c b/trunk/arch/arm/mach-at91/board-kb9202.c index 0c519d9ebffc..f168bec2369f 100644 --- a/trunk/arch/arm/mach-at91/board-kb9202.c +++ b/trunk/arch/arm/mach-at91/board-kb9202.c @@ -149,7 +149,7 @@ static void __init kb9202_board_init(void) MACHINE_START(KB9200, "KB920x") /* Maintainer: KwikByte, Inc. */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = kb9202_init_early, diff --git a/trunk/arch/arm/mach-at91/board-neocore926.c b/trunk/arch/arm/mach-at91/board-neocore926.c index 5b4760fe53de..bc7a1c4a1f6a 100644 --- a/trunk/arch/arm/mach-at91/board-neocore926.c +++ b/trunk/arch/arm/mach-at91/board-neocore926.c @@ -378,7 +378,7 @@ static void __init neocore926_board_init(void) MACHINE_START(NEOCORE926, "ADENEO NEOCORE 926") /* Maintainer: ADENEO */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = neocore926_init_early, diff --git a/trunk/arch/arm/mach-at91/board-pcontrol-g20.c b/trunk/arch/arm/mach-at91/board-pcontrol-g20.c index 65c0d6b5ecba..0299554495dd 100644 --- a/trunk/arch/arm/mach-at91/board-pcontrol-g20.c +++ b/trunk/arch/arm/mach-at91/board-pcontrol-g20.c @@ -217,7 +217,7 @@ static void __init pcontrol_g20_board_init(void) MACHINE_START(PCONTROL_G20, "PControl G20") /* Maintainer: pgsellmann@portner-elektronik.at */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = pcontrol_g20_init_early, diff --git a/trunk/arch/arm/mach-at91/board-picotux200.c b/trunk/arch/arm/mach-at91/board-picotux200.c index ab2b2ec36c14..4938f1cd5e13 100644 --- a/trunk/arch/arm/mach-at91/board-picotux200.c +++ b/trunk/arch/arm/mach-at91/board-picotux200.c @@ -119,7 +119,7 @@ static void __init picotux200_board_init(void) MACHINE_START(PICOTUX2XX, "picotux 200") /* Maintainer: Kleinhenz Elektronik GmbH */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = picotux200_init_early, diff --git a/trunk/arch/arm/mach-at91/board-qil-a9260.c b/trunk/arch/arm/mach-at91/board-qil-a9260.c index aa3bc9b0f150..33b1628467ea 100644 --- a/trunk/arch/arm/mach-at91/board-qil-a9260.c +++ b/trunk/arch/arm/mach-at91/board-qil-a9260.c @@ -257,7 +257,7 @@ static void __init ek_board_init(void) MACHINE_START(QIL_A9260, "CALAO QIL_A9260") /* Maintainer: calao-systems */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-rm9200-dt.c b/trunk/arch/arm/mach-at91/board-rm9200-dt.c index 3fcb6623a33e..5f9ce3da3fde 100644 --- a/trunk/arch/arm/mach-at91/board-rm9200-dt.c +++ b/trunk/arch/arm/mach-at91/board-rm9200-dt.c @@ -47,7 +47,7 @@ static const char *at91rm9200_dt_board_compat[] __initdata = { }; DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)") - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = at91rm9200_dt_initialize, diff --git a/trunk/arch/arm/mach-at91/board-rm9200dk.c b/trunk/arch/arm/mach-at91/board-rm9200dk.c index 690541b18cbc..9e5061bef0d0 100644 --- a/trunk/arch/arm/mach-at91/board-rm9200dk.c +++ b/trunk/arch/arm/mach-at91/board-rm9200dk.c @@ -219,7 +219,7 @@ static void __init dk_board_init(void) MACHINE_START(AT91RM9200DK, "Atmel AT91RM9200-DK") /* Maintainer: SAN People/Atmel */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = dk_init_early, diff --git a/trunk/arch/arm/mach-at91/board-rm9200ek.c b/trunk/arch/arm/mach-at91/board-rm9200ek.c index 8b17dadc1aba..58277dbc718f 100644 --- a/trunk/arch/arm/mach-at91/board-rm9200ek.c +++ b/trunk/arch/arm/mach-at91/board-rm9200ek.c @@ -186,7 +186,7 @@ static void __init ek_board_init(void) MACHINE_START(AT91RM9200EK, "Atmel AT91RM9200-EK") /* Maintainer: SAN People/Atmel */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-rsi-ews.c b/trunk/arch/arm/mach-at91/board-rsi-ews.c index f6d7f1958c7e..2e8b8339a206 100644 --- a/trunk/arch/arm/mach-at91/board-rsi-ews.c +++ b/trunk/arch/arm/mach-at91/board-rsi-ews.c @@ -222,7 +222,7 @@ static void __init rsi_ews_board_init(void) MACHINE_START(RSI_EWS, "RSI EWS") /* Maintainer: Josef Holzmayr */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = rsi_ews_init_early, diff --git a/trunk/arch/arm/mach-at91/board-sam9-l9260.c b/trunk/arch/arm/mach-at91/board-sam9-l9260.c index 43ee4dc43b50..b75fbf6003a1 100644 --- a/trunk/arch/arm/mach-at91/board-sam9-l9260.c +++ b/trunk/arch/arm/mach-at91/board-sam9-l9260.c @@ -218,7 +218,7 @@ static void __init ek_board_init(void) MACHINE_START(SAM9_L9260, "Olimex SAM9-L9260") /* Maintainer: Olimex */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-sam9260ek.c b/trunk/arch/arm/mach-at91/board-sam9260ek.c index 0b153c87521d..f0135cd1d858 100644 --- a/trunk/arch/arm/mach-at91/board-sam9260ek.c +++ b/trunk/arch/arm/mach-at91/board-sam9260ek.c @@ -343,7 +343,7 @@ static void __init ek_board_init(void) MACHINE_START(AT91SAM9260EK, "Atmel AT91SAM9260-EK") /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-sam9261ek.c b/trunk/arch/arm/mach-at91/board-sam9261ek.c index b446645c7727..13ebaa8e4100 100644 --- a/trunk/arch/arm/mach-at91/board-sam9261ek.c +++ b/trunk/arch/arm/mach-at91/board-sam9261ek.c @@ -612,7 +612,7 @@ MACHINE_START(AT91SAM9261EK, "Atmel AT91SAM9261-EK") MACHINE_START(AT91SAM9G10EK, "Atmel AT91SAM9G10-EK") #endif /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-sam9263ek.c b/trunk/arch/arm/mach-at91/board-sam9263ek.c index 3284df05df14..89b9608742a7 100644 --- a/trunk/arch/arm/mach-at91/board-sam9263ek.c +++ b/trunk/arch/arm/mach-at91/board-sam9263ek.c @@ -443,7 +443,7 @@ static void __init ek_board_init(void) MACHINE_START(AT91SAM9263EK, "Atmel AT91SAM9263-EK") /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-sam9g20ek.c b/trunk/arch/arm/mach-at91/board-sam9g20ek.c index f9cd1f2c7146..1b7dd9f688d3 100644 --- a/trunk/arch/arm/mach-at91/board-sam9g20ek.c +++ b/trunk/arch/arm/mach-at91/board-sam9g20ek.c @@ -409,7 +409,7 @@ static void __init ek_board_init(void) MACHINE_START(AT91SAM9G20EK, "Atmel AT91SAM9G20-EK") /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, @@ -419,7 +419,7 @@ MACHINE_END MACHINE_START(AT91SAM9G20EK_2MMC, "Atmel AT91SAM9G20-EK 2 MMC Slot Mod") /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-sam9m10g45ek.c b/trunk/arch/arm/mach-at91/board-sam9m10g45ek.c index 2a94896a1375..e4cc375e3a32 100644 --- a/trunk/arch/arm/mach-at91/board-sam9m10g45ek.c +++ b/trunk/arch/arm/mach-at91/board-sam9m10g45ek.c @@ -502,7 +502,7 @@ static void __init ek_board_init(void) MACHINE_START(AT91SAM9M10G45EK, "Atmel AT91SAM9M10G45-EK") /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-sam9rlek.c b/trunk/arch/arm/mach-at91/board-sam9rlek.c index aa265dcf2128..377a1097afa7 100644 --- a/trunk/arch/arm/mach-at91/board-sam9rlek.c +++ b/trunk/arch/arm/mach-at91/board-sam9rlek.c @@ -320,7 +320,7 @@ static void __init ek_board_init(void) MACHINE_START(AT91SAM9RLEK, "Atmel AT91SAM9RL-EK") /* Maintainer: Atmel */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-snapper9260.c b/trunk/arch/arm/mach-at91/board-snapper9260.c index 3aaa9784cf0e..98771500ddb9 100644 --- a/trunk/arch/arm/mach-at91/board-snapper9260.c +++ b/trunk/arch/arm/mach-at91/board-snapper9260.c @@ -177,7 +177,7 @@ static void __init snapper9260_board_init(void) } MACHINE_START(SNAPPER_9260, "Bluewater Systems Snapper 9260/9G20 module") - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = snapper9260_init_early, diff --git a/trunk/arch/arm/mach-at91/board-stamp9g20.c b/trunk/arch/arm/mach-at91/board-stamp9g20.c index a033b8df9fb2..48a962b61fa3 100644 --- a/trunk/arch/arm/mach-at91/board-stamp9g20.c +++ b/trunk/arch/arm/mach-at91/board-stamp9g20.c @@ -272,7 +272,7 @@ static void __init stamp9g20evb_board_init(void) MACHINE_START(PORTUXG20, "taskit PortuxG20") /* Maintainer: taskit GmbH */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = stamp9g20_init_early, @@ -282,7 +282,7 @@ MACHINE_END MACHINE_START(STAMP9G20, "taskit Stamp9G20") /* Maintainer: taskit GmbH */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = stamp9g20_init_early, diff --git a/trunk/arch/arm/mach-at91/board-usb-a926x.c b/trunk/arch/arm/mach-at91/board-usb-a926x.c index 2487d944a1bc..c1060f96e589 100644 --- a/trunk/arch/arm/mach-at91/board-usb-a926x.c +++ b/trunk/arch/arm/mach-at91/board-usb-a926x.c @@ -355,7 +355,7 @@ static void __init ek_board_init(void) MACHINE_START(USB_A9263, "CALAO USB_A9263") /* Maintainer: calao-systems */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, @@ -365,7 +365,7 @@ MACHINE_END MACHINE_START(USB_A9260, "CALAO USB_A9260") /* Maintainer: calao-systems */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, @@ -375,7 +375,7 @@ MACHINE_END MACHINE_START(USB_A9G20, "CALAO USB_A92G0") /* Maintainer: Jean-Christophe PLAGNIOL-VILLARD */ - .init_time = at91sam926x_pit_init, + .timer = &at91sam926x_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = ek_init_early, diff --git a/trunk/arch/arm/mach-at91/board-yl-9200.c b/trunk/arch/arm/mach-at91/board-yl-9200.c index be083771df2e..8673aebcb85d 100644 --- a/trunk/arch/arm/mach-at91/board-yl-9200.c +++ b/trunk/arch/arm/mach-at91/board-yl-9200.c @@ -587,7 +587,7 @@ static void __init yl9200_board_init(void) MACHINE_START(YL9200, "uCdragon YL-9200") /* Maintainer: S.Birtles */ - .init_time = at91rm9200_timer_init, + .timer = &at91rm9200_timer, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = yl9200_init_early, diff --git a/trunk/arch/arm/mach-at91/generic.h b/trunk/arch/arm/mach-at91/generic.h index 78ab06548658..fc593d615e7d 100644 --- a/trunk/arch/arm/mach-at91/generic.h +++ b/trunk/arch/arm/mach-at91/generic.h @@ -36,11 +36,12 @@ extern int __init at91_aic5_of_init(struct device_node *node, /* Timer */ +struct sys_timer; extern void at91rm9200_ioremap_st(u32 addr); -extern void at91rm9200_timer_init(void); +extern struct sys_timer at91rm9200_timer; extern void at91sam926x_ioremap_pit(u32 addr); -extern void at91sam926x_pit_init(void); -extern void at91x40_timer_init(void); +extern struct sys_timer at91sam926x_timer; +extern struct sys_timer at91x40_timer; /* Clocks */ #ifdef CONFIG_AT91_PMC_UNIT diff --git a/trunk/arch/arm/mach-bcm/board_bcm.c b/trunk/arch/arm/mach-bcm/board_bcm.c index f0f9abafad29..3a62f1b1cabc 100644 --- a/trunk/arch/arm/mach-bcm/board_bcm.c +++ b/trunk/arch/arm/mach-bcm/board_bcm.c @@ -11,19 +11,34 @@ * GNU General Public License for more details. */ +#include #include #include #include #include -#include #include +#include + #include +static const struct of_device_id irq_match[] = { + {.compatible = "arm,cortex-a9-gic", .data = gic_of_init, }, + {} +}; + static void timer_init(void) { } +static struct sys_timer timer = { + .init = timer_init, +}; + +static void __init init_irq(void) +{ + of_irq_init(irq_match); +} static void __init board_init(void) { @@ -34,8 +49,9 @@ static void __init board_init(void) static const char * const bcm11351_dt_compat[] = { "bcm,bcm11351", NULL, }; DT_MACHINE_START(BCM11351_DT, "Broadcom Application Processor") - .init_irq = irqchip_init, - .init_time = timer_init, + .init_irq = init_irq, + .timer = &timer, .init_machine = board_init, .dt_compat = bcm11351_dt_compat, + .handle_irq = gic_handle_irq, MACHINE_END diff --git a/trunk/arch/arm/mach-bcm2835/bcm2835.c b/trunk/arch/arm/mach-bcm2835/bcm2835.c index 176d2d24782d..f0d739f4b7a3 100644 --- a/trunk/arch/arm/mach-bcm2835/bcm2835.c +++ b/trunk/arch/arm/mach-bcm2835/bcm2835.c @@ -104,7 +104,7 @@ DT_MACHINE_START(BCM2835, "BCM2835") .init_irq = bcm2835_init_irq, .handle_irq = bcm2835_handle_irq, .init_machine = bcm2835_init, - .init_time = bcm2835_timer_init, + .timer = &bcm2835_timer, .restart = bcm2835_restart, .dt_compat = bcm2835_compat MACHINE_END diff --git a/trunk/arch/arm/mach-clps711x/board-autcpu12.c b/trunk/arch/arm/mach-clps711x/board-autcpu12.c index f38584709df7..3fbf43f72589 100644 --- a/trunk/arch/arm/mach-clps711x/board-autcpu12.c +++ b/trunk/arch/arm/mach-clps711x/board-autcpu12.c @@ -170,7 +170,7 @@ MACHINE_START(AUTCPU12, "autronix autcpu12") .nr_irqs = CLPS711X_NR_IRQS, .map_io = clps711x_map_io, .init_irq = clps711x_init_irq, - .init_time = clps711x_timer_init, + .timer = &clps711x_timer, .init_machine = autcpu12_init, .init_late = autcpu12_init_late, .handle_irq = clps711x_handle_irq, diff --git a/trunk/arch/arm/mach-clps711x/board-cdb89712.c b/trunk/arch/arm/mach-clps711x/board-cdb89712.c index baab7da33c9b..60900ddf97c9 100644 --- a/trunk/arch/arm/mach-clps711x/board-cdb89712.c +++ b/trunk/arch/arm/mach-clps711x/board-cdb89712.c @@ -140,7 +140,7 @@ MACHINE_START(CDB89712, "Cirrus-CDB89712") .nr_irqs = CLPS711X_NR_IRQS, .map_io = clps711x_map_io, .init_irq = clps711x_init_irq, - .init_time = clps711x_timer_init, + .timer = &clps711x_timer, .init_machine = cdb89712_init, .handle_irq = clps711x_handle_irq, .restart = clps711x_restart, diff --git a/trunk/arch/arm/mach-clps711x/board-clep7312.c b/trunk/arch/arm/mach-clps711x/board-clep7312.c index 014aa3c19a03..0b32a487183b 100644 --- a/trunk/arch/arm/mach-clps711x/board-clep7312.c +++ b/trunk/arch/arm/mach-clps711x/board-clep7312.c @@ -40,7 +40,7 @@ MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312") .fixup = fixup_clep7312, .map_io = clps711x_map_io, .init_irq = clps711x_init_irq, - .init_time = clps711x_timer_init, + .timer = &clps711x_timer, .handle_irq = clps711x_handle_irq, .restart = clps711x_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-clps711x/board-edb7211.c b/trunk/arch/arm/mach-clps711x/board-edb7211.c index 5f928e9ed2ef..71aa5cf2c0d3 100644 --- a/trunk/arch/arm/mach-clps711x/board-edb7211.c +++ b/trunk/arch/arm/mach-clps711x/board-edb7211.c @@ -173,7 +173,7 @@ MACHINE_START(EDB7211, "CL-EDB7211 (EP7211 eval board)") .reserve = edb7211_reserve, .map_io = edb7211_map_io, .init_irq = clps711x_init_irq, - .init_time = clps711x_timer_init, + .timer = &clps711x_timer, .init_machine = edb7211_init, .handle_irq = clps711x_handle_irq, .restart = clps711x_restart, diff --git a/trunk/arch/arm/mach-clps711x/board-fortunet.c b/trunk/arch/arm/mach-clps711x/board-fortunet.c index c5675efc8c6a..7d0125580366 100644 --- a/trunk/arch/arm/mach-clps711x/board-fortunet.c +++ b/trunk/arch/arm/mach-clps711x/board-fortunet.c @@ -78,7 +78,7 @@ MACHINE_START(FORTUNET, "ARM-FortuNet") .fixup = fortunet_fixup, .map_io = clps711x_map_io, .init_irq = clps711x_init_irq, - .init_time = clps711x_timer_init, + .timer = &clps711x_timer, .handle_irq = clps711x_handle_irq, .restart = clps711x_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-clps711x/board-p720t.c b/trunk/arch/arm/mach-clps711x/board-p720t.c index 8d3ee6771135..1518fc83babd 100644 --- a/trunk/arch/arm/mach-clps711x/board-p720t.c +++ b/trunk/arch/arm/mach-clps711x/board-p720t.c @@ -224,7 +224,7 @@ MACHINE_START(P720T, "ARM-Prospector720T") .map_io = p720t_map_io, .init_early = p720t_init_early, .init_irq = clps711x_init_irq, - .init_time = clps711x_timer_init, + .timer = &clps711x_timer, .init_machine = p720t_init, .init_late = p720t_init_late, .handle_irq = clps711x_handle_irq, diff --git a/trunk/arch/arm/mach-clps711x/common.c b/trunk/arch/arm/mach-clps711x/common.c index 20ff50f3ccf0..e046439573ee 100644 --- a/trunk/arch/arm/mach-clps711x/common.c +++ b/trunk/arch/arm/mach-clps711x/common.c @@ -282,7 +282,7 @@ static void add_fixed_clk(struct clk *clk, const char *name, int rate) clk_register_clkdev(clk, name, NULL); } -void __init clps711x_timer_init(void) +static void __init clps711x_timer_init(void) { int osc, ext, pll, cpu, bus, timl, timh, uart, spi; u32 tmp; @@ -345,6 +345,10 @@ void __init clps711x_timer_init(void) setup_irq(IRQ_TC2OI, &clps711x_timer_irq); } +struct sys_timer clps711x_timer = { + .init = clps711x_timer_init, +}; + void clps711x_restart(char mode, const char *cmd) { soft_restart(0); diff --git a/trunk/arch/arm/mach-clps711x/common.h b/trunk/arch/arm/mach-clps711x/common.h index f84a7292c70e..b7c0c75c90c0 100644 --- a/trunk/arch/arm/mach-clps711x/common.h +++ b/trunk/arch/arm/mach-clps711x/common.h @@ -8,8 +8,10 @@ #define CLPS711X_NR_GPIO (4 * 8 + 3) #define CLPS711X_GPIO(prt, bit) ((prt) * 8 + (bit)) +struct sys_timer; + extern void clps711x_map_io(void); extern void clps711x_init_irq(void); -extern void clps711x_timer_init(void); extern void clps711x_handle_irq(struct pt_regs *regs); extern void clps711x_restart(char mode, const char *cmd); +extern struct sys_timer clps711x_timer; diff --git a/trunk/arch/arm/mach-cns3xxx/cns3420vb.c b/trunk/arch/arm/mach-cns3xxx/cns3420vb.c index a71867e1d8d6..ae305397003c 100644 --- a/trunk/arch/arm/mach-cns3xxx/cns3420vb.c +++ b/trunk/arch/arm/mach-cns3xxx/cns3420vb.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -249,7 +250,8 @@ MACHINE_START(CNS3420VB, "Cavium Networks CNS3420 Validation Board") .atag_offset = 0x100, .map_io = cns3420_map_io, .init_irq = cns3xxx_init_irq, - .init_time = cns3xxx_timer_init, + .timer = &cns3xxx_timer, + .handle_irq = gic_handle_irq, .init_machine = cns3420_init, .restart = cns3xxx_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-cns3xxx/core.c b/trunk/arch/arm/mach-cns3xxx/core.c index e698f26cc0cb..031805b1428d 100644 --- a/trunk/arch/arm/mach-cns3xxx/core.c +++ b/trunk/arch/arm/mach-cns3xxx/core.c @@ -12,10 +12,10 @@ #include #include #include -#include #include #include #include +#include #include #include #include "core.h" @@ -134,6 +134,7 @@ static int cns3xxx_timer_set_next_event(unsigned long evt, static struct clock_event_device cns3xxx_tmr1_clockevent = { .name = "cns3xxx timer1", + .shift = 8, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_mode = cns3xxx_timer_set_mode, .set_next_event = cns3xxx_timer_set_next_event, @@ -144,9 +145,15 @@ static struct clock_event_device cns3xxx_tmr1_clockevent = { static void __init cns3xxx_clockevents_init(unsigned int timer_irq) { cns3xxx_tmr1_clockevent.irq = timer_irq; - clockevents_config_and_register(&cns3xxx_tmr1_clockevent, - (cns3xxx_cpu_clock() >> 3) * 1000000, - 0xf, 0xffffffff); + cns3xxx_tmr1_clockevent.mult = + div_sc((cns3xxx_cpu_clock() >> 3) * 1000000, NSEC_PER_SEC, + cns3xxx_tmr1_clockevent.shift); + cns3xxx_tmr1_clockevent.max_delta_ns = + clockevent_delta2ns(0xffffffff, &cns3xxx_tmr1_clockevent); + cns3xxx_tmr1_clockevent.min_delta_ns = + clockevent_delta2ns(0xf, &cns3xxx_tmr1_clockevent); + + clockevents_register_device(&cns3xxx_tmr1_clockevent); } /* @@ -228,13 +235,17 @@ static void __init __cns3xxx_timer_init(unsigned int timer_irq) cns3xxx_clockevents_init(timer_irq); } -void __init cns3xxx_timer_init(void) +static void __init cns3xxx_timer_init(void) { cns3xxx_tmr1 = IOMEM(CNS3XXX_TIMER1_2_3_BASE_VIRT); __cns3xxx_timer_init(IRQ_CNS3XXX_TIMER0); } +struct sys_timer cns3xxx_timer = { + .init = cns3xxx_timer_init, +}; + #ifdef CONFIG_CACHE_L2X0 void __init cns3xxx_l2x0_init(void) diff --git a/trunk/arch/arm/mach-cns3xxx/core.h b/trunk/arch/arm/mach-cns3xxx/core.h index b23b17b4da10..4894b8c17151 100644 --- a/trunk/arch/arm/mach-cns3xxx/core.h +++ b/trunk/arch/arm/mach-cns3xxx/core.h @@ -11,7 +11,7 @@ #ifndef __CNS3XXX_CORE_H #define __CNS3XXX_CORE_H -extern void cns3xxx_timer_init(void); +extern struct sys_timer cns3xxx_timer; #ifdef CONFIG_CACHE_L2X0 void __init cns3xxx_l2x0_init(void); diff --git a/trunk/arch/arm/mach-davinci/board-da830-evm.c b/trunk/arch/arm/mach-davinci/board-da830-evm.c index e3742716cbaa..95b5e102ceb1 100644 --- a/trunk/arch/arm/mach-davinci/board-da830-evm.c +++ b/trunk/arch/arm/mach-davinci/board-da830-evm.c @@ -679,7 +679,7 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM") .atag_offset = 0x100, .map_io = da830_evm_map_io, .init_irq = cp_intc_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = da830_evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-da850-evm.c b/trunk/arch/arm/mach-davinci/board-da850-evm.c index 3b3356097bb0..0299915575a8 100644 --- a/trunk/arch/arm/mach-davinci/board-da850-evm.c +++ b/trunk/arch/arm/mach-davinci/board-da850-evm.c @@ -1599,7 +1599,7 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM") .atag_offset = 0x100, .map_io = da850_evm_map_io, .init_irq = cp_intc_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = da850_evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-dm355-evm.c b/trunk/arch/arm/mach-davinci/board-dm355-evm.c index 147b8e1a4407..cdf8d0746e79 100644 --- a/trunk/arch/arm/mach-davinci/board-dm355-evm.c +++ b/trunk/arch/arm/mach-davinci/board-dm355-evm.c @@ -355,7 +355,7 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM") .atag_offset = 0x100, .map_io = dm355_evm_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = dm355_evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-dm355-leopard.c b/trunk/arch/arm/mach-davinci/board-dm355-leopard.c index dff4ddc5ef81..d41954507fc2 100644 --- a/trunk/arch/arm/mach-davinci/board-dm355-leopard.c +++ b/trunk/arch/arm/mach-davinci/board-dm355-leopard.c @@ -274,7 +274,7 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard") .atag_offset = 0x100, .map_io = dm355_leopard_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = dm355_leopard_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-dm365-evm.c b/trunk/arch/arm/mach-davinci/board-dm365-evm.c index c2d4958a0cb6..5d49c75388ca 100644 --- a/trunk/arch/arm/mach-davinci/board-dm365-evm.c +++ b/trunk/arch/arm/mach-davinci/board-dm365-evm.c @@ -616,7 +616,7 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM") .atag_offset = 0x100, .map_io = dm365_evm_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = dm365_evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-dm644x-evm.c b/trunk/arch/arm/mach-davinci/board-dm644x-evm.c index e4a16f98e6a2..f5e018de7fa5 100644 --- a/trunk/arch/arm/mach-davinci/board-dm644x-evm.c +++ b/trunk/arch/arm/mach-davinci/board-dm644x-evm.c @@ -825,7 +825,7 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM") .atag_offset = 0x100, .map_io = davinci_evm_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = davinci_evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-dm646x-evm.c b/trunk/arch/arm/mach-davinci/board-dm646x-evm.c index de7adff324dc..6e2f1631df5b 100644 --- a/trunk/arch/arm/mach-davinci/board-dm646x-evm.c +++ b/trunk/arch/arm/mach-davinci/board-dm646x-evm.c @@ -818,7 +818,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM") .atag_offset = 0x100, .map_io = davinci_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, @@ -829,7 +829,7 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM") .atag_offset = 0x100, .map_io = davinci_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = evm_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-mityomapl138.c b/trunk/arch/arm/mach-davinci/board-mityomapl138.c index b0df578bf744..43e4a0d663fa 100644 --- a/trunk/arch/arm/mach-davinci/board-mityomapl138.c +++ b/trunk/arch/arm/mach-davinci/board-mityomapl138.c @@ -570,7 +570,7 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808") .atag_offset = 0x100, .map_io = mityomapl138_map_io, .init_irq = cp_intc_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = mityomapl138_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-neuros-osd2.c b/trunk/arch/arm/mach-davinci/board-neuros-osd2.c index 1c98107527fa..3e3e3afebf88 100644 --- a/trunk/arch/arm/mach-davinci/board-neuros-osd2.c +++ b/trunk/arch/arm/mach-davinci/board-neuros-osd2.c @@ -237,7 +237,7 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2") .atag_offset = 0x100, .map_io = davinci_ntosd2_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = davinci_ntosd2_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-omapl138-hawk.c b/trunk/arch/arm/mach-davinci/board-omapl138-hawk.c index deb3922612b9..dc1208e9e664 100644 --- a/trunk/arch/arm/mach-davinci/board-omapl138-hawk.c +++ b/trunk/arch/arm/mach-davinci/board-omapl138-hawk.c @@ -341,7 +341,7 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard") .atag_offset = 0x100, .map_io = omapl138_hawk_map_io, .init_irq = cp_intc_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = omapl138_hawk_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-sffsdr.c b/trunk/arch/arm/mach-davinci/board-sffsdr.c index 739be7e738fe..6957787fa7f3 100644 --- a/trunk/arch/arm/mach-davinci/board-sffsdr.c +++ b/trunk/arch/arm/mach-davinci/board-sffsdr.c @@ -155,7 +155,7 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR") .atag_offset = 0x100, .map_io = davinci_sffsdr_map_io, .init_irq = davinci_irq_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = davinci_sffsdr_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/board-tnetv107x-evm.c b/trunk/arch/arm/mach-davinci/board-tnetv107x-evm.c index 4f416023d4e2..be3099733b1f 100644 --- a/trunk/arch/arm/mach-davinci/board-tnetv107x-evm.c +++ b/trunk/arch/arm/mach-davinci/board-tnetv107x-evm.c @@ -280,7 +280,7 @@ MACHINE_START(TNETV107X, "TNETV107X EVM") .atag_offset = 0x100, .map_io = tnetv107x_init, .init_irq = cp_intc_init, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = tnetv107x_evm_board_init, .init_late = davinci_init_late, .dma_zone_size = SZ_128M, diff --git a/trunk/arch/arm/mach-davinci/da8xx-dt.c b/trunk/arch/arm/mach-davinci/da8xx-dt.c index 9a7c76efc8f8..37c27af18fa0 100644 --- a/trunk/arch/arm/mach-davinci/da8xx-dt.c +++ b/trunk/arch/arm/mach-davinci/da8xx-dt.c @@ -56,7 +56,7 @@ static const char *da850_boards_compat[] __initdata = { DT_MACHINE_START(DA850_DT, "Generic DA850/OMAP-L138/AM18x") .map_io = da850_init, .init_irq = da8xx_init_irq, - .init_time = davinci_timer_init, + .timer = &davinci_timer, .init_machine = da850_init_machine, .dt_compat = da850_boards_compat, .init_late = davinci_init_late, diff --git a/trunk/arch/arm/mach-davinci/include/mach/common.h b/trunk/arch/arm/mach-davinci/include/mach/common.h index b124b77c90c5..046c7238a3d6 100644 --- a/trunk/arch/arm/mach-davinci/include/mach/common.h +++ b/trunk/arch/arm/mach-davinci/include/mach/common.h @@ -15,7 +15,9 @@ #include #include -extern void davinci_timer_init(void); +struct sys_timer; + +extern struct sys_timer davinci_timer; extern void davinci_irq_init(void); extern void __iomem *davinci_intc_base; diff --git a/trunk/arch/arm/mach-davinci/time.c b/trunk/arch/arm/mach-davinci/time.c index bad361ec1666..9847938785ca 100644 --- a/trunk/arch/arm/mach-davinci/time.c +++ b/trunk/arch/arm/mach-davinci/time.c @@ -337,7 +337,7 @@ static struct clock_event_device clockevent_davinci = { }; -void __init davinci_timer_init(void) +static void __init davinci_timer_init(void) { struct clk *timer_clk; struct davinci_soc_info *soc_info = &davinci_soc_info; @@ -410,6 +410,11 @@ void __init davinci_timer_init(void) timer32_config(&timers[i]); } +struct sys_timer davinci_timer = { + .init = davinci_timer_init, +}; + + /* reset board using watchdog timer */ void davinci_watchdog_reset(struct platform_device *pdev) { diff --git a/trunk/arch/arm/mach-dove/cm-a510.c b/trunk/arch/arm/mach-dove/cm-a510.c index 0dc39cf30fdd..792b4e2e24f1 100644 --- a/trunk/arch/arm/mach-dove/cm-a510.c +++ b/trunk/arch/arm/mach-dove/cm-a510.c @@ -92,6 +92,6 @@ MACHINE_START(CM_A510, "Compulab CM-A510 Board") .map_io = dove_map_io, .init_early = dove_init_early, .init_irq = dove_init_irq, - .init_time = dove_timer_init, + .timer = &dove_timer, .restart = dove_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-dove/common.c b/trunk/arch/arm/mach-dove/common.c index 0c7911b3e155..89f4f993cd03 100644 --- a/trunk/arch/arm/mach-dove/common.c +++ b/trunk/arch/arm/mach-dove/common.c @@ -242,13 +242,17 @@ static int __init dove_find_tclk(void) return 166666667; } -void __init dove_timer_init(void) +static void __init dove_timer_init(void) { dove_tclk = dove_find_tclk(); orion_time_init(BRIDGE_VIRT_BASE, BRIDGE_INT_TIMER1_CLR, IRQ_DOVE_BRIDGE, dove_tclk); } +struct sys_timer dove_timer = { + .init = dove_timer_init, +}; + /***************************************************************************** * Cryptographic Engines and Security Accelerator (CESA) ****************************************************************************/ @@ -450,7 +454,7 @@ DT_MACHINE_START(DOVE_DT, "Marvell Dove (Flattened Device Tree)") .map_io = dove_map_io, .init_early = dove_init_early, .init_irq = orion_dt_init_irq, - .init_time = dove_timer_init, + .timer = &dove_timer, .init_machine = dove_dt_init, .restart = dove_restart, .dt_compat = dove_dt_board_compat, diff --git a/trunk/arch/arm/mach-dove/common.h b/trunk/arch/arm/mach-dove/common.h index ee59fba4c6d1..1a233404b735 100644 --- a/trunk/arch/arm/mach-dove/common.h +++ b/trunk/arch/arm/mach-dove/common.h @@ -14,7 +14,7 @@ struct mv643xx_eth_platform_data; struct mv_sata_platform_data; -extern void dove_timer_init(void); +extern struct sys_timer dove_timer; /* * Basic Dove init functions used early by machine-setup. diff --git a/trunk/arch/arm/mach-dove/dove-db-setup.c b/trunk/arch/arm/mach-dove/dove-db-setup.c index 76e26f949c27..bc2867f11346 100644 --- a/trunk/arch/arm/mach-dove/dove-db-setup.c +++ b/trunk/arch/arm/mach-dove/dove-db-setup.c @@ -98,6 +98,6 @@ MACHINE_START(DOVE_DB, "Marvell DB-MV88AP510-BP Development Board") .map_io = dove_map_io, .init_early = dove_init_early, .init_irq = dove_init_irq, - .init_time = dove_timer_init, + .timer = &dove_timer, .restart = dove_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-ebsa110/core.c b/trunk/arch/arm/mach-ebsa110/core.c index b13cc74114db..f0fe6b5350e2 100644 --- a/trunk/arch/arm/mach-ebsa110/core.c +++ b/trunk/arch/arm/mach-ebsa110/core.c @@ -158,7 +158,7 @@ static void __init ebsa110_init_early(void) * interrupt, then the PIT counter will roll over (ie, be negative). * This actually works out to be convenient. */ -static u32 ebsa110_gettimeoffset(void) +static unsigned long ebsa110_gettimeoffset(void) { unsigned long offset, count; @@ -181,7 +181,7 @@ static u32 ebsa110_gettimeoffset(void) */ offset = offset * (1000000 / HZ) / COUNT; - return offset * 1000; + return offset; } static irqreturn_t @@ -213,10 +213,8 @@ static struct irqaction ebsa110_timer_irq = { /* * Set up timer interrupt. */ -void __init ebsa110_timer_init(void) +static void __init ebsa110_timer_init(void) { - arch_gettimeoffset = ebsa110_gettimeoffset; - /* * Timer 1, mode 2, LSB/MSB */ @@ -227,6 +225,11 @@ void __init ebsa110_timer_init(void) setup_irq(IRQ_EBSA110_TIMER0, &ebsa110_timer_irq); } +static struct sys_timer ebsa110_timer = { + .init = ebsa110_timer_init, + .offset = ebsa110_gettimeoffset, +}; + static struct plat_serial8250_port serial_platform_data[] = { { .iobase = 0x3f8, @@ -325,6 +328,6 @@ MACHINE_START(EBSA110, "EBSA110") .map_io = ebsa110_map_io, .init_early = ebsa110_init_early, .init_irq = ebsa110_init_irq, - .init_time = ebsa110_timer_init, + .timer = &ebsa110_timer, .restart = ebsa110_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-ep93xx/adssphere.c b/trunk/arch/arm/mach-ep93xx/adssphere.c index bda6c3a5c923..41383bf03d4b 100644 --- a/trunk/arch/arm/mach-ep93xx/adssphere.c +++ b/trunk/arch/arm/mach-ep93xx/adssphere.c @@ -17,6 +17,7 @@ #include +#include #include #include @@ -38,7 +39,8 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = adssphere_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-ep93xx/core.c b/trunk/arch/arm/mach-ep93xx/core.c index c49ed3dc1aea..e85bf17f2d2a 100644 --- a/trunk/arch/arm/mach-ep93xx/core.c +++ b/trunk/arch/arm/mach-ep93xx/core.c @@ -34,7 +34,6 @@ #include #include #include -#include #include #include @@ -45,6 +44,8 @@ #include #include +#include + #include "soc.h" /************************************************************************* @@ -139,29 +140,11 @@ static struct irqaction ep93xx_timer_irq = { .handler = ep93xx_timer_interrupt, }; -static u32 ep93xx_gettimeoffset(void) -{ - int offset; - - offset = __raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time; - - /* - * Timer 4 is based on a 983.04 kHz reference clock, - * so dividing by 983040 gives the fraction of a second, - * so dividing by 0.983040 converts to uS. - * Refactor the calculation to avoid overflow. - * Finally, multiply by 1000 to give nS. - */ - return (offset + (53 * offset / 3072)) * 1000; -} - -void __init ep93xx_timer_init(void) +static void __init ep93xx_timer_init(void) { u32 tmode = EP93XX_TIMER123_CONTROL_MODE | EP93XX_TIMER123_CONTROL_CLKSEL; - arch_gettimeoffset = ep93xx_gettimeoffset; - /* Enable periodic HZ timer. */ __raw_writel(tmode, EP93XX_TIMER1_CONTROL); __raw_writel(TIMER1_RELOAD, EP93XX_TIMER1_LOAD); @@ -175,6 +158,21 @@ void __init ep93xx_timer_init(void) setup_irq(IRQ_EP93XX_TIMER1, &ep93xx_timer_irq); } +static unsigned long ep93xx_gettimeoffset(void) +{ + int offset; + + offset = __raw_readl(EP93XX_TIMER4_VALUE_LOW) - last_jiffy_time; + + /* Calculate (1000000 / 983040) * offset. */ + return offset + (53 * offset / 3072); +} + +struct sys_timer ep93xx_timer = { + .init = ep93xx_timer_init, + .offset = ep93xx_gettimeoffset, +}; + /************************************************************************* * EP93xx IRQ handling diff --git a/trunk/arch/arm/mach-ep93xx/edb93xx.c b/trunk/arch/arm/mach-ep93xx/edb93xx.c index 27b14ae92c7e..b8f53d57a299 100644 --- a/trunk/arch/arm/mach-ep93xx/edb93xx.c +++ b/trunk/arch/arm/mach-ep93xx/edb93xx.c @@ -39,6 +39,7 @@ #include #include +#include #include #include @@ -275,7 +276,8 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -288,7 +290,8 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -301,7 +304,8 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -314,7 +318,8 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -327,7 +332,8 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -340,7 +346,8 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -353,7 +360,8 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -366,7 +374,8 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = edb93xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-ep93xx/gesbc9312.c b/trunk/arch/arm/mach-ep93xx/gesbc9312.c index 0cca5b183309..7fd705b5efe4 100644 --- a/trunk/arch/arm/mach-ep93xx/gesbc9312.c +++ b/trunk/arch/arm/mach-ep93xx/gesbc9312.c @@ -17,6 +17,7 @@ #include +#include #include #include @@ -38,7 +39,8 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = gesbc9312_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-ep93xx/include/mach/platform.h b/trunk/arch/arm/mach-ep93xx/include/mach/platform.h index a14e1b37beff..33a5122c6dc8 100644 --- a/trunk/arch/arm/mach-ep93xx/include/mach/platform.h +++ b/trunk/arch/arm/mach-ep93xx/include/mach/platform.h @@ -53,7 +53,7 @@ int ep93xx_ide_acquire_gpio(struct platform_device *pdev); void ep93xx_ide_release_gpio(struct platform_device *pdev); void ep93xx_init_devices(void); -extern void ep93xx_timer_init(void); +extern struct sys_timer ep93xx_timer; void ep93xx_restart(char, const char *); void ep93xx_init_late(void); diff --git a/trunk/arch/arm/mach-ep93xx/micro9.c b/trunk/arch/arm/mach-ep93xx/micro9.c index 373583c29825..3d7cdab725b2 100644 --- a/trunk/arch/arm/mach-ep93xx/micro9.c +++ b/trunk/arch/arm/mach-ep93xx/micro9.c @@ -18,6 +18,7 @@ #include +#include #include #include @@ -81,7 +82,8 @@ MACHINE_START(MICRO9, "Contec Micro9-High") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -94,7 +96,8 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -107,7 +110,8 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, @@ -120,7 +124,8 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = micro9_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-ep93xx/simone.c b/trunk/arch/arm/mach-ep93xx/simone.c index 36f22c1a31fe..0eb3f17a6fa2 100644 --- a/trunk/arch/arm/mach-ep93xx/simone.c +++ b/trunk/arch/arm/mach-ep93xx/simone.c @@ -25,6 +25,7 @@ #include #include +#include #include #include @@ -82,7 +83,8 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = simone_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-ep93xx/snappercl15.c b/trunk/arch/arm/mach-ep93xx/snappercl15.c index aa86f86638dd..50043eef1cf2 100644 --- a/trunk/arch/arm/mach-ep93xx/snappercl15.c +++ b/trunk/arch/arm/mach-ep93xx/snappercl15.c @@ -31,6 +31,7 @@ #include #include +#include #include #include @@ -175,7 +176,8 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15") .atag_offset = 0x100, .map_io = ep93xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = snappercl15_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-ep93xx/ts72xx.c b/trunk/arch/arm/mach-ep93xx/ts72xx.c index 61f4b5dc4d7d..3c4c233391dc 100644 --- a/trunk/arch/arm/mach-ep93xx/ts72xx.c +++ b/trunk/arch/arm/mach-ep93xx/ts72xx.c @@ -22,6 +22,7 @@ #include +#include #include #include #include @@ -245,7 +246,8 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC") .atag_offset = 0x100, .map_io = ts72xx_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = ts72xx_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-ep93xx/vision_ep9307.c b/trunk/arch/arm/mach-ep93xx/vision_ep9307.c index 605956fd07a2..ba92e25e3016 100644 --- a/trunk/arch/arm/mach-ep93xx/vision_ep9307.c +++ b/trunk/arch/arm/mach-ep93xx/vision_ep9307.c @@ -34,6 +34,7 @@ #include #include +#include #include #include #include @@ -363,7 +364,8 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307") .atag_offset = 0x100, .map_io = vision_map_io, .init_irq = ep93xx_init_irq, - .init_time = ep93xx_timer_init, + .handle_irq = vic_handle_irq, + .timer = &ep93xx_timer, .init_machine = vision_init_machine, .init_late = ep93xx_init_late, .restart = ep93xx_restart, diff --git a/trunk/arch/arm/mach-exynos/Kconfig b/trunk/arch/arm/mach-exynos/Kconfig index 85afb031b676..e103c290bc9e 100644 --- a/trunk/arch/arm/mach-exynos/Kconfig +++ b/trunk/arch/arm/mach-exynos/Kconfig @@ -414,7 +414,7 @@ config MACH_EXYNOS4_DT select CPU_EXYNOS4210 select HAVE_SAMSUNG_KEYPAD if INPUT_KEYBOARD select PINCTRL - select PINCTRL_EXYNOS + select PINCTRL_EXYNOS4 select USE_OF help Machine support for Samsung Exynos4 machine with device tree enabled. diff --git a/trunk/arch/arm/mach-exynos/common.c b/trunk/arch/arm/mach-exynos/common.c index 4ea80bc4ef9b..1a89824a5f78 100644 --- a/trunk/arch/arm/mach-exynos/common.c +++ b/trunk/arch/arm/mach-exynos/common.c @@ -22,13 +22,12 @@ #include #include #include -#include #include -#include #include #include #include +#include #include #include #include @@ -645,6 +644,8 @@ static int __init combiner_of_init(struct device_node *np, } static const struct of_device_id exynos_dt_irq_match[] = { + { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, }, + { .compatible = "arm,cortex-a15-gic", .data = gic_of_init, }, { .compatible = "samsung,exynos4210-combiner", .data = combiner_of_init, }, {}, @@ -660,10 +661,8 @@ void __init exynos4_init_irq(void) if (!of_have_populated_dt()) gic_init_bases(0, IRQ_PPI(0), S5P_VA_GIC_DIST, S5P_VA_GIC_CPU, gic_bank_offset, NULL); #ifdef CONFIG_OF - else { - irqchip_init(); + else of_irq_init(exynos_dt_irq_match); - } #endif if (!of_have_populated_dt()) @@ -680,7 +679,6 @@ void __init exynos4_init_irq(void) void __init exynos5_init_irq(void) { #ifdef CONFIG_OF - irqchip_init(); of_irq_init(exynos_dt_irq_match); #endif /* diff --git a/trunk/arch/arm/mach-exynos/common.h b/trunk/arch/arm/mach-exynos/common.h index 12f2f1117e99..04744f9c120f 100644 --- a/trunk/arch/arm/mach-exynos/common.h +++ b/trunk/arch/arm/mach-exynos/common.h @@ -12,7 +12,7 @@ #ifndef __ARCH_ARM_MACH_EXYNOS_COMMON_H #define __ARCH_ARM_MACH_EXYNOS_COMMON_H -extern void exynos4_timer_init(void); +extern struct sys_timer exynos4_timer; struct map_desc; void exynos_init_io(struct map_desc *mach_desc, int size); diff --git a/trunk/arch/arm/mach-exynos/include/mach/regs-irq.h b/trunk/arch/arm/mach-exynos/include/mach/regs-irq.h index f2b50506b9f6..9c7b4bfd546f 100644 --- a/trunk/arch/arm/mach-exynos/include/mach/regs-irq.h +++ b/trunk/arch/arm/mach-exynos/include/mach/regs-irq.h @@ -13,7 +13,7 @@ #ifndef __ASM_ARCH_REGS_IRQ_H #define __ASM_ARCH_REGS_IRQ_H __FILE__ -#include +#include #include #endif /* __ASM_ARCH_REGS_IRQ_H */ diff --git a/trunk/arch/arm/mach-exynos/mach-armlex4210.c b/trunk/arch/arm/mach-exynos/mach-armlex4210.c index 685f29173afa..b938f9fc1dd1 100644 --- a/trunk/arch/arm/mach-exynos/mach-armlex4210.c +++ b/trunk/arch/arm/mach-exynos/mach-armlex4210.c @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -200,8 +201,9 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210") .smp = smp_ops(exynos_smp_ops), .init_irq = exynos4_init_irq, .map_io = armlex4210_map_io, + .handle_irq = gic_handle_irq, .init_machine = armlex4210_machine_init, .init_late = exynos_init_late, - .init_time = exynos4_timer_init, + .timer = &exynos4_timer, .restart = exynos4_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-exynos/mach-exynos4-dt.c b/trunk/arch/arm/mach-exynos/mach-exynos4-dt.c index 112d10e53d20..92757ff817ae 100644 --- a/trunk/arch/arm/mach-exynos/mach-exynos4-dt.c +++ b/trunk/arch/arm/mach-exynos/mach-exynos4-dt.c @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -106,9 +107,10 @@ DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)") .smp = smp_ops(exynos_smp_ops), .init_irq = exynos4_init_irq, .map_io = exynos4_dt_map_io, + .handle_irq = gic_handle_irq, .init_machine = exynos4_dt_machine_init, .init_late = exynos_init_late, - .init_time = exynos4_timer_init, + .timer = &exynos4_timer, .dt_compat = exynos4_dt_compat, .restart = exynos4_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-exynos/mach-exynos5-dt.c b/trunk/arch/arm/mach-exynos/mach-exynos5-dt.c index 0deeecffa3ae..e99d3d8f2bcf 100644 --- a/trunk/arch/arm/mach-exynos/mach-exynos5-dt.c +++ b/trunk/arch/arm/mach-exynos/mach-exynos5-dt.c @@ -16,6 +16,7 @@ #include #include +#include #include #include @@ -178,9 +179,10 @@ DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)") .init_irq = exynos5_init_irq, .smp = smp_ops(exynos_smp_ops), .map_io = exynos5_dt_map_io, + .handle_irq = gic_handle_irq, .init_machine = exynos5_dt_machine_init, .init_late = exynos_init_late, - .init_time = exynos4_timer_init, + .timer = &exynos4_timer, .dt_compat = exynos5_dt_compat, .restart = exynos5_restart, .reserve = exynos5_reserve, diff --git a/trunk/arch/arm/mach-exynos/mach-nuri.c b/trunk/arch/arm/mach-exynos/mach-nuri.c index b8b3fbf0bae7..27d4ed8b116e 100644 --- a/trunk/arch/arm/mach-exynos/mach-nuri.c +++ b/trunk/arch/arm/mach-exynos/mach-nuri.c @@ -39,6 +39,7 @@ #include #include +#include #include #include @@ -1378,9 +1379,10 @@ MACHINE_START(NURI, "NURI") .smp = smp_ops(exynos_smp_ops), .init_irq = exynos4_init_irq, .map_io = nuri_map_io, + .handle_irq = gic_handle_irq, .init_machine = nuri_machine_init, .init_late = exynos_init_late, - .init_time = exynos4_timer_init, + .timer = &exynos4_timer, .reserve = &nuri_reserve, .restart = exynos4_restart, MACHINE_END diff --git a/trunk/arch/arm/mach-exynos/mach-origen.c b/trunk/arch/arm/mach-exynos/mach-origen.c index 579d2d171daa..5e34b9c16196 100644 --- a/trunk/arch/arm/mach-exynos/mach-origen.c +++ b/trunk/arch/arm/mach-exynos/mach-origen.c @@ -29,6 +29,7 @@ #include #include +#include #include #include