Skip to content

Commit

Permalink
Merge branch 'kvm-arm-fixes' of git://github.com/columbia/linux-kvm-a…
Browse files Browse the repository at this point in the history
…rm into devel-stable
  • Loading branch information
Russell King committed Mar 15, 2013
2 parents 73a09d2 + f42798c commit 0098fc3
Show file tree
Hide file tree
Showing 19 changed files with 585 additions and 387 deletions.
4 changes: 4 additions & 0 deletions arch/arm/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -211,4 +211,8 @@

#define HSR_HVC_IMM_MASK ((1UL << 16) - 1)

#define HSR_DABT_S1PTW (1U << 7)
#define HSR_DABT_CM (1U << 8)
#define HSR_DABT_EA (1U << 9)

#endif /* __ARM_KVM_ARM_H__ */
2 changes: 1 addition & 1 deletion arch/arm/include/asm/kvm_asm.h
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ extern char __kvm_hyp_code_end[];
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);

extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);

extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
#endif
Expand Down
107 changes: 100 additions & 7 deletions arch/arm/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,11 +22,12 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_arm.h>

u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu);

int kvm_handle_wfi(struct kvm_vcpu *vcpu, struct kvm_run *run);
bool kvm_condition_valid(struct kvm_vcpu *vcpu);
void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr);
void kvm_inject_undefined(struct kvm_vcpu *vcpu);
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
Expand All @@ -37,14 +38,14 @@ static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
return 1;
}

static inline u32 *vcpu_pc(struct kvm_vcpu *vcpu)
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
return (u32 *)&vcpu->arch.regs.usr_regs.ARM_pc;
return &vcpu->arch.regs.usr_regs.ARM_pc;
}

static inline u32 *vcpu_cpsr(struct kvm_vcpu *vcpu)
static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
{
return (u32 *)&vcpu->arch.regs.usr_regs.ARM_cpsr;
return &vcpu->arch.regs.usr_regs.ARM_cpsr;
}

static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
Expand All @@ -69,4 +70,96 @@ static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
return reg == 15;
}

static inline u32 kvm_vcpu_get_hsr(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hsr;
}

static inline unsigned long kvm_vcpu_get_hfar(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hxfar;
}

static inline phys_addr_t kvm_vcpu_get_fault_ipa(struct kvm_vcpu *vcpu)
{
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
}

static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.hyp_pc;
}

static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
}

static inline bool kvm_vcpu_dabt_iswrite(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_WNR;
}

static inline bool kvm_vcpu_dabt_issext(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_SSE;
}

static inline int kvm_vcpu_dabt_get_rd(struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & HSR_SRT_MASK) >> HSR_SRT_SHIFT;
}

static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_EA;
}

static inline bool kvm_vcpu_dabt_iss1tw(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
}

/* Get Access Size from a data abort */
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
{
switch ((kvm_vcpu_get_hsr(vcpu) >> 22) & 0x3) {
case 0:
return 1;
case 1:
return 2;
case 2:
return 4;
default:
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
return -EFAULT;
}
}

/* This one is not specific to Data Abort */
static inline bool kvm_vcpu_trap_il_is32bit(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_IL;
}

static inline u8 kvm_vcpu_trap_get_class(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) >> HSR_EC_SHIFT;
}

static inline bool kvm_vcpu_trap_is_iabt(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_trap_get_class(vcpu) == HSR_EC_IABT;
}

static inline u8 kvm_vcpu_trap_get_fault(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_FSC_TYPE;
}

static inline u32 kvm_vcpu_hvc_get_imm(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_HVC_IMM_MASK;
}

#endif /* __ARM_KVM_EMULATE_H__ */
42 changes: 34 additions & 8 deletions arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,15 @@ struct kvm_mmu_memory_cache {
void *objects[KVM_NR_MEM_OBJS];
};

struct kvm_vcpu_fault_info {
u32 hsr; /* Hyp Syndrome Register */
u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
u32 hpfar; /* Hyp IPA Fault Address Register */
u32 hyp_pc; /* PC when exception was taken from Hyp mode */
};

typedef struct vfp_hard_struct kvm_kernel_vfp_t;

struct kvm_vcpu_arch {
struct kvm_regs regs;

Expand All @@ -93,13 +102,11 @@ struct kvm_vcpu_arch {
u32 midr;

/* Exception Information */
u32 hsr; /* Hyp Syndrome Register */
u32 hxfar; /* Hyp Data/Inst Fault Address Register */
u32 hpfar; /* Hyp IPA Fault Address Register */
struct kvm_vcpu_fault_info fault;

/* Floating point registers (VFP and Advanced SIMD/NEON) */
struct vfp_hard_struct vfp_guest;
struct vfp_hard_struct *vfp_host;
kvm_kernel_vfp_t vfp_guest;
kvm_kernel_vfp_t *vfp_host;

/* VGIC state */
struct vgic_cpu vgic_cpu;
Expand All @@ -122,9 +129,6 @@ struct kvm_vcpu_arch {
/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */

/* Hyp exception information */
u32 hyp_pc; /* PC when exception was taken from Hyp mode */

/* Cache some mmu pages needed inside spinlock regions */
struct kvm_mmu_memory_cache mmu_page_cache;

Expand Down Expand Up @@ -181,4 +185,26 @@ struct kvm_one_reg;
int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);
int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *);

int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
int exception_index);

static inline void __cpu_init_hyp_mode(unsigned long long pgd_ptr,
unsigned long hyp_stack_ptr,
unsigned long vector_ptr)
{
unsigned long pgd_low, pgd_high;

pgd_low = (pgd_ptr & ((1ULL << 32) - 1));
pgd_high = (pgd_ptr >> 32ULL);

/*
* Call initialization code, and switch to the full blown
* HYP code. The init code doesn't need to preserve these registers as
* r1-r3 and r12 are already callee save according to the AAPCS.
* Note that we slightly misuse the prototype by casing the pgd_low to
* a void *.
*/
kvm_call_hyp((void *)pgd_low, pgd_high, hyp_stack_ptr, vector_ptr);
}

#endif /* __ARM_KVM_HOST_H__ */
67 changes: 67 additions & 0 deletions arch/arm/include/asm/kvm_mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,18 @@
#ifndef __ARM_KVM_MMU_H__
#define __ARM_KVM_MMU_H__

#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
#include <asm/idmap.h>

/*
* We directly use the kernel VA for the HYP, as we can directly share
* the mapping (HTTBR "covers" TTBR1).
*/
#define HYP_PAGE_OFFSET_MASK (~0UL)
#define HYP_PAGE_OFFSET PAGE_OFFSET
#define KERN_TO_HYP(kva) (kva)

int create_hyp_mappings(void *from, void *to);
int create_hyp_io_mappings(void *from, void *to, phys_addr_t);
void free_hyp_pmds(void);
Expand All @@ -36,6 +48,16 @@ phys_addr_t kvm_mmu_get_httbr(void);
int kvm_mmu_init(void);
void kvm_clear_hyp_idmap(void);

static inline void kvm_set_pte(pte_t *pte, pte_t new_pte)
{
pte_val(*pte) = new_pte;
/*
* flush_pmd_entry just takes a void pointer and cleans the necessary
* cache entries, so we can reuse the function for ptes.
*/
flush_pmd_entry(pte);
}

static inline bool kvm_is_write_fault(unsigned long hsr)
{
unsigned long hsr_ec = hsr >> HSR_EC_SHIFT;
Expand All @@ -47,4 +69,49 @@ static inline bool kvm_is_write_fault(unsigned long hsr)
return true;
}

static inline void kvm_clean_pgd(pgd_t *pgd)
{
clean_dcache_area(pgd, PTRS_PER_S2_PGD * sizeof(pgd_t));
}

static inline void kvm_clean_pmd_entry(pmd_t *pmd)
{
clean_pmd_entry(pmd);
}

static inline void kvm_clean_pte(pte_t *pte)
{
clean_pte_table(pte);
}

static inline void kvm_set_s2pte_writable(pte_t *pte)
{
pte_val(*pte) |= L_PTE_S2_RDWR;
}

struct kvm;

static inline void coherent_icache_guest_page(struct kvm *kvm, gfn_t gfn)
{
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
* (or another VM) may have used the same page as this guest, and we
* read incorrect data from the icache. If we're using a PIPT cache,
* we can invalidate just that page, but if we are using a VIPT cache
* we need to invalidate the entire icache - damn shame - as written
* in the ARM ARM (DDI 0406C.b - Page B3-1393).
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
*/
if (icache_is_pipt()) {
unsigned long hva = gfn_to_hva(kvm, gfn);
__cpuc_coherent_user_range(hva, hva + PAGE_SIZE);
} else if (!icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}

#endif /* __ARM_KVM_MMU_H__ */
1 change: 0 additions & 1 deletion arch/arm/include/asm/kvm_vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@

#include <linux/kernel.h>
#include <linux/kvm.h>
#include <linux/kvm_host.h>
#include <linux/irqreturn.h>
#include <linux/spinlock.h>
#include <linux/types.h>
Expand Down
12 changes: 6 additions & 6 deletions arch/arm/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,12 +53,12 @@
#define KVM_ARM_FIQ_spsr fiq_regs[7]

struct kvm_regs {
struct pt_regs usr_regs;/* R0_usr - R14_usr, PC, CPSR */
__u32 svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
__u32 abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
__u32 und_regs[3]; /* SP_und, LR_und, SPSR_und */
__u32 irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
__u32 fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
struct pt_regs usr_regs; /* R0_usr - R14_usr, PC, CPSR */
unsigned long svc_regs[3]; /* SP_svc, LR_svc, SPSR_svc */
unsigned long abt_regs[3]; /* SP_abt, LR_abt, SPSR_abt */
unsigned long und_regs[3]; /* SP_und, LR_und, SPSR_und */
unsigned long irq_regs[3]; /* SP_irq, LR_irq, SPSR_irq */
unsigned long fiq_regs[8]; /* R8_fiq - R14_fiq, SPSR_fiq */
};

/* Supported Processor Types */
Expand Down
8 changes: 4 additions & 4 deletions arch/arm/kernel/asm-offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,10 +165,10 @@ int main(void)
DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.hsr));
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.hxfar));
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.hpfar));
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.hyp_pc));
DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
#ifdef CONFIG_KVM_ARM_VGIC
DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
DEFINE(VGIC_CPU_HCR, offsetof(struct vgic_cpu, vgic_hcr));
Expand Down
2 changes: 1 addition & 1 deletion arch/arm/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ AFLAGS_interrupts.o := -Wa,-march=armv7-a$(plus_virt)
kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)

obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o guest.o mmu.o emulate.o reset.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o mmio.o psci.o
obj-$(CONFIG_KVM_ARM_VGIC) += vgic.o
obj-$(CONFIG_KVM_ARM_TIMER) += arch_timer.o
Loading

0 comments on commit 0098fc3

Please sign in to comment.