Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 351186
b: refs/heads/master
c: 45e96ea
h: refs/heads/master
v: v3
  • Loading branch information
Christoffer Dall committed Jan 23, 2013
1 parent 4966ea6 commit 4a09700
Show file tree
Hide file tree
Showing 10 changed files with 256 additions and 4 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 94f8e6418d3915dbefbb5d66b63146f1df12b0c0
refs/heads/master: 45e96ea6b369539a37040a8df9c59a39f073d9d6
3 changes: 3 additions & 0 deletions trunk/arch/arm/include/asm/kvm_arm.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,8 +173,11 @@
#define HSR_ISS (HSR_IL - 1)
#define HSR_ISV_SHIFT (24)
#define HSR_ISV (1U << HSR_ISV_SHIFT)
#define HSR_SRT_SHIFT (16)
#define HSR_SRT_MASK (0xf << HSR_SRT_SHIFT)
#define HSR_FSC (0x3f)
#define HSR_FSC_TYPE (0x3c)
#define HSR_SSE (1 << 21)
#define HSR_WNR (1 << 6)
#define HSR_CV_SHIFT (24)
#define HSR_CV (1U << HSR_CV_SHIFT)
Expand Down
6 changes: 6 additions & 0 deletions trunk/arch/arm/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>

u32 *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num);
u32 *vcpu_spsr(struct kvm_vcpu *vcpu);
Expand Down Expand Up @@ -53,4 +54,9 @@ static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
return cpsr_mode > USR_MODE;;
}

static inline bool kvm_vcpu_reg_is_pc(struct kvm_vcpu *vcpu, int reg)
{
return reg == 15;
}

#endif /* __ARM_KVM_EMULATE_H__ */
4 changes: 4 additions & 0 deletions trunk/arch/arm/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

#include <asm/kvm.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
#include <asm/fpstate.h>

#define KVM_MAX_VCPUS CONFIG_KVM_ARM_MAX_VCPUS
Expand Down Expand Up @@ -99,6 +100,9 @@ struct kvm_vcpu_arch {
int last_pcpu;
cpumask_t require_dcache_flush;

/* IO related fields */
struct kvm_decode mmio_decode;

/* Interrupt related fields */
u32 irq_lines; /* IRQ and FIQ levels */

Expand Down
56 changes: 56 additions & 0 deletions trunk/arch/arm/include/asm/kvm_mmio.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/

#ifndef __ARM_KVM_MMIO_H__
#define __ARM_KVM_MMIO_H__

#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_arm.h>

struct kvm_decode {
unsigned long rt;
bool sign_extend;
};

/*
* The in-kernel MMIO emulation code wants to use a copy of run->mmio,
* which is an anonymous type. Use our own type instead.
*/
struct kvm_exit_mmio {
phys_addr_t phys_addr;
u8 data[8];
u32 len;
bool is_write;
};

static inline void kvm_prepare_mmio(struct kvm_run *run,
struct kvm_exit_mmio *mmio)
{
run->mmio.phys_addr = mmio->phys_addr;
run->mmio.len = mmio->len;
run->mmio.is_write = mmio->is_write;
memcpy(run->mmio.data, mmio->data, mmio->len);
run->exit_reason = KVM_EXIT_MMIO;
}

int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa);

#endif /* __ARM_KVM_MMIO_H__ */
2 changes: 1 addition & 1 deletion trunk/arch/arm/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -18,4 +18,4 @@ kvm-arm-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o)

obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o
obj-y += coproc.o coproc_a15.o mmio.o
6 changes: 6 additions & 0 deletions trunk/arch/arm/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -616,6 +616,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (ret)
return ret;

if (run->exit_reason == KVM_EXIT_MMIO) {
ret = kvm_handle_mmio_return(vcpu, vcpu->run);
if (ret)
return ret;
}

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

Expand Down
153 changes: 153 additions & 0 deletions trunk/arch/arm/kvm/mmio.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,153 @@
/*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Author: Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/

#include <linux/kvm_host.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_emulate.h>
#include <trace/events/kvm.h>

#include "trace.h"

/**
* kvm_handle_mmio_return -- Handle MMIO loads after user space emulation
* @vcpu: The VCPU pointer
* @run: The VCPU run struct containing the mmio data
*
* This should only be called after returning from userspace for MMIO load
* emulation.
*/
int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
__u32 *dest;
unsigned int len;
int mask;

if (!run->mmio.is_write) {
dest = vcpu_reg(vcpu, vcpu->arch.mmio_decode.rt);
memset(dest, 0, sizeof(int));

len = run->mmio.len;
if (len > 4)
return -EINVAL;

memcpy(dest, run->mmio.data, len);

trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr,
*((u64 *)run->mmio.data));

if (vcpu->arch.mmio_decode.sign_extend && len < 4) {
mask = 1U << ((len * 8) - 1);
*dest = (*dest ^ mask) - mask;
}
}

return 0;
}

static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_exit_mmio *mmio)
{
unsigned long rt, len;
bool is_write, sign_extend;

if ((vcpu->arch.hsr >> 8) & 1) {
/* cache operation on I/O addr, tell guest unsupported */
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}

if ((vcpu->arch.hsr >> 7) & 1) {
/* page table accesses IO mem: tell guest to fix its TTBR */
kvm_inject_dabt(vcpu, vcpu->arch.hxfar);
return 1;
}

switch ((vcpu->arch.hsr >> 22) & 0x3) {
case 0:
len = 1;
break;
case 1:
len = 2;
break;
case 2:
len = 4;
break;
default:
kvm_err("Hardware is weird: SAS 0b11 is reserved\n");
return -EFAULT;
}

is_write = vcpu->arch.hsr & HSR_WNR;
sign_extend = vcpu->arch.hsr & HSR_SSE;
rt = (vcpu->arch.hsr & HSR_SRT_MASK) >> HSR_SRT_SHIFT;

if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
/* IO memory trying to read/write pc */
kvm_inject_pabt(vcpu, vcpu->arch.hxfar);
return 1;
}

mmio->is_write = is_write;
mmio->phys_addr = fault_ipa;
mmio->len = len;
vcpu->arch.mmio_decode.sign_extend = sign_extend;
vcpu->arch.mmio_decode.rt = rt;

/*
* The MMIO instruction is emulated and should not be re-executed
* in the guest.
*/
kvm_skip_instr(vcpu, (vcpu->arch.hsr >> 25) & 1);
return 0;
}

int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
phys_addr_t fault_ipa)
{
struct kvm_exit_mmio mmio;
unsigned long rt;
int ret;

/*
* Prepare MMIO operation. First stash it in a private
* structure that we can use for in-kernel emulation. If the
* kernel can't handle it, copy it into run->mmio and let user
* space do its magic.
*/

if (vcpu->arch.hsr & HSR_ISV) {
ret = decode_hsr(vcpu, fault_ipa, &mmio);
if (ret)
return ret;
} else {
kvm_err("load/store instruction decoding not implemented\n");
return -ENOSYS;
}

rt = vcpu->arch.mmio_decode.rt;
trace_kvm_mmio((mmio.is_write) ? KVM_TRACE_MMIO_WRITE :
KVM_TRACE_MMIO_READ_UNSATISFIED,
mmio.len, fault_ipa,
(mmio.is_write) ? *vcpu_reg(vcpu, rt) : 0);

if (mmio.is_write)
memcpy(mmio.data, vcpu_reg(vcpu, rt), mmio.len);

kvm_prepare_mmio(run, &mmio);
return 0;
}
7 changes: 5 additions & 2 deletions trunk/arch/arm/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,13 @@
#include <linux/mman.h>
#include <linux/kvm_host.h>
#include <linux/io.h>
#include <trace/events/kvm.h>
#include <asm/idmap.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_mmu.h>
#include <asm/kvm_mmio.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/mach/map.h>
Expand Down Expand Up @@ -624,8 +626,9 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
goto out_unlock;
}

kvm_pr_unimpl("I/O address abort...");
ret = 0;
/* Adjust page offset */
fault_ipa |= vcpu->arch.hxfar & ~PAGE_MASK;
ret = io_mem_abort(vcpu, run, fault_ipa);
goto out_unlock;
}

Expand Down
21 changes: 21 additions & 0 deletions trunk/arch/arm/kvm/trace.h
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,27 @@ TRACE_EVENT(kvm_irq_line,
__entry->type, __entry->vcpu_idx, __entry->irq_num, __entry->level)
);

TRACE_EVENT(kvm_mmio_emulate,
TP_PROTO(unsigned long vcpu_pc, unsigned long instr,
unsigned long cpsr),
TP_ARGS(vcpu_pc, instr, cpsr),

TP_STRUCT__entry(
__field( unsigned long, vcpu_pc )
__field( unsigned long, instr )
__field( unsigned long, cpsr )
),

TP_fast_assign(
__entry->vcpu_pc = vcpu_pc;
__entry->instr = instr;
__entry->cpsr = cpsr;
),

TP_printk("Emulate MMIO at: 0x%08lx (instr: %08lx, cpsr: %08lx)",
__entry->vcpu_pc, __entry->instr, __entry->cpsr)
);

/* Architecturally implementation defined CP15 register access */
TRACE_EVENT(kvm_emulate_cp15_imp,
TP_PROTO(unsigned long Op1, unsigned long Rt1, unsigned long CRn,
Expand Down

0 comments on commit 4a09700

Please sign in to comment.