-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
KVM: s390: arch backend for the kvm kernel module
This patch contains the port of Qumranet's kvm kernel module to IBM zSeries (aka s390x, mainframe) architecture. It uses the mainframe's virtualization instruction SIE to run virtual machines with up to 64 virtual CPUs each. This port is only usable on 64bit host kernels, and can only run 64bit guest kernels. However, running 31bit applications in guest userspace is possible. The following source files are introduced by this patch arch/s390/kvm/kvm-s390.c similar to arch/x86/kvm/x86.c, this implements all arch callbacks for kvm. __vcpu_run calls back into sie64a to enter the guest machine context arch/s390/kvm/sie64a.S assembler function sie64a, which enters guest context via SIE, and switches world before and after that include/asm-s390/kvm_host.h contains all vital data structures needed to run virtual machines on the mainframe include/asm-s390/kvm.h defines kvm_regs and friends for user access to guest register content arch/s390/kvm/gaccess.h functions similar to uaccess to access guest memory arch/s390/kvm/kvm-s390.h header file for kvm-s390 internals, extended by later patches Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
- Loading branch information
Heiko Carstens
authored and
Avi Kivity
committed
Apr 27, 2008
1 parent
8a88ac6
commit b0c632d
Showing
12 changed files
with
1,145 additions
and
2 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
# Makefile for kernel virtual machines on s390 | ||
# | ||
# Copyright IBM Corp. 2008 | ||
# | ||
# This program is free software; you can redistribute it and/or modify | ||
# it under the terms of the GNU General Public License (version 2 only) | ||
# as published by the Free Software Foundation. | ||
|
||
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o) | ||
|
||
EXTRA_CFLAGS += -Ivirt/kvm -Iarch/s390/kvm | ||
|
||
kvm-objs := $(common-objs) kvm-s390.o sie64a.o | ||
obj-$(CONFIG_KVM) += kvm.o |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,274 @@ | ||
/* | ||
* gaccess.h - access guest memory | ||
* | ||
* Copyright IBM Corp. 2008 | ||
* | ||
* This program is free software; you can redistribute it and/or modify | ||
* it under the terms of the GNU General Public License (version 2 only) | ||
* as published by the Free Software Foundation. | ||
* | ||
* Author(s): Carsten Otte <cotte@de.ibm.com> | ||
*/ | ||
|
||
#ifndef __KVM_S390_GACCESS_H | ||
#define __KVM_S390_GACCESS_H | ||
|
||
#include <linux/compiler.h> | ||
#include <linux/kvm_host.h> | ||
#include <asm/uaccess.h> | ||
|
||
static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu, | ||
u64 guestaddr) | ||
{ | ||
u64 prefix = vcpu->arch.sie_block->prefix; | ||
u64 origin = vcpu->kvm->arch.guest_origin; | ||
u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
|
||
if (guestaddr < 2 * PAGE_SIZE) | ||
guestaddr += prefix; | ||
else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE)) | ||
guestaddr -= prefix; | ||
|
||
if (guestaddr > memsize) | ||
return (void __user __force *) ERR_PTR(-EFAULT); | ||
|
||
guestaddr += origin; | ||
|
||
return (void __user *) guestaddr; | ||
} | ||
|
||
static inline int get_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u64 *result) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
BUG_ON(guestaddr & 7); | ||
|
||
if (IS_ERR((void __force *) uptr)) | ||
return PTR_ERR((void __force *) uptr); | ||
|
||
return get_user(*result, (u64 __user *) uptr); | ||
} | ||
|
||
static inline int get_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u32 *result) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
BUG_ON(guestaddr & 3); | ||
|
||
if (IS_ERR((void __force *) uptr)) | ||
return PTR_ERR((void __force *) uptr); | ||
|
||
return get_user(*result, (u32 __user *) uptr); | ||
} | ||
|
||
static inline int get_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u16 *result) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
BUG_ON(guestaddr & 1); | ||
|
||
if (IS_ERR(uptr)) | ||
return PTR_ERR(uptr); | ||
|
||
return get_user(*result, (u16 __user *) uptr); | ||
} | ||
|
||
static inline int get_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u8 *result) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
if (IS_ERR((void __force *) uptr)) | ||
return PTR_ERR((void __force *) uptr); | ||
|
||
return get_user(*result, (u8 __user *) uptr); | ||
} | ||
|
||
static inline int put_guest_u64(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u64 value) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
BUG_ON(guestaddr & 7); | ||
|
||
if (IS_ERR((void __force *) uptr)) | ||
return PTR_ERR((void __force *) uptr); | ||
|
||
return put_user(value, (u64 __user *) uptr); | ||
} | ||
|
||
static inline int put_guest_u32(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u32 value) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
BUG_ON(guestaddr & 3); | ||
|
||
if (IS_ERR((void __force *) uptr)) | ||
return PTR_ERR((void __force *) uptr); | ||
|
||
return put_user(value, (u32 __user *) uptr); | ||
} | ||
|
||
static inline int put_guest_u16(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u16 value) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
BUG_ON(guestaddr & 1); | ||
|
||
if (IS_ERR((void __force *) uptr)) | ||
return PTR_ERR((void __force *) uptr); | ||
|
||
return put_user(value, (u16 __user *) uptr); | ||
} | ||
|
||
static inline int put_guest_u8(struct kvm_vcpu *vcpu, u64 guestaddr, | ||
u8 value) | ||
{ | ||
void __user *uptr = __guestaddr_to_user(vcpu, guestaddr); | ||
|
||
if (IS_ERR((void __force *) uptr)) | ||
return PTR_ERR((void __force *) uptr); | ||
|
||
return put_user(value, (u8 __user *) uptr); | ||
} | ||
|
||
|
||
static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu, u64 guestdest, | ||
const void *from, unsigned long n) | ||
{ | ||
int rc; | ||
unsigned long i; | ||
const u8 *data = from; | ||
|
||
for (i = 0; i < n; i++) { | ||
rc = put_guest_u8(vcpu, guestdest++, *(data++)); | ||
if (rc < 0) | ||
return rc; | ||
} | ||
return 0; | ||
} | ||
|
||
static inline int copy_to_guest(struct kvm_vcpu *vcpu, u64 guestdest, | ||
const void *from, unsigned long n) | ||
{ | ||
u64 prefix = vcpu->arch.sie_block->prefix; | ||
u64 origin = vcpu->kvm->arch.guest_origin; | ||
u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
|
||
if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE)) | ||
goto slowpath; | ||
|
||
if ((guestdest < prefix) && (guestdest + n > prefix)) | ||
goto slowpath; | ||
|
||
if ((guestdest < prefix + 2 * PAGE_SIZE) | ||
&& (guestdest + n > prefix + 2 * PAGE_SIZE)) | ||
goto slowpath; | ||
|
||
if (guestdest < 2 * PAGE_SIZE) | ||
guestdest += prefix; | ||
else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE)) | ||
guestdest -= prefix; | ||
|
||
if (guestdest + n > memsize) | ||
return -EFAULT; | ||
|
||
if (guestdest + n < guestdest) | ||
return -EFAULT; | ||
|
||
guestdest += origin; | ||
|
||
return copy_to_user((void __user *) guestdest, from, n); | ||
slowpath: | ||
return __copy_to_guest_slow(vcpu, guestdest, from, n); | ||
} | ||
|
||
static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to, | ||
u64 guestsrc, unsigned long n) | ||
{ | ||
int rc; | ||
unsigned long i; | ||
u8 *data = to; | ||
|
||
for (i = 0; i < n; i++) { | ||
rc = get_guest_u8(vcpu, guestsrc++, data++); | ||
if (rc < 0) | ||
return rc; | ||
} | ||
return 0; | ||
} | ||
|
||
static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to, | ||
u64 guestsrc, unsigned long n) | ||
{ | ||
u64 prefix = vcpu->arch.sie_block->prefix; | ||
u64 origin = vcpu->kvm->arch.guest_origin; | ||
u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
|
||
if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE)) | ||
goto slowpath; | ||
|
||
if ((guestsrc < prefix) && (guestsrc + n > prefix)) | ||
goto slowpath; | ||
|
||
if ((guestsrc < prefix + 2 * PAGE_SIZE) | ||
&& (guestsrc + n > prefix + 2 * PAGE_SIZE)) | ||
goto slowpath; | ||
|
||
if (guestsrc < 2 * PAGE_SIZE) | ||
guestsrc += prefix; | ||
else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE)) | ||
guestsrc -= prefix; | ||
|
||
if (guestsrc + n > memsize) | ||
return -EFAULT; | ||
|
||
if (guestsrc + n < guestsrc) | ||
return -EFAULT; | ||
|
||
guestsrc += origin; | ||
|
||
return copy_from_user(to, (void __user *) guestsrc, n); | ||
slowpath: | ||
return __copy_from_guest_slow(vcpu, to, guestsrc, n); | ||
} | ||
|
||
static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu, u64 guestdest, | ||
const void *from, unsigned long n) | ||
{ | ||
u64 origin = vcpu->kvm->arch.guest_origin; | ||
u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
|
||
if (guestdest + n > memsize) | ||
return -EFAULT; | ||
|
||
if (guestdest + n < guestdest) | ||
return -EFAULT; | ||
|
||
guestdest += origin; | ||
|
||
return copy_to_user((void __user *) guestdest, from, n); | ||
} | ||
|
||
static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to, | ||
u64 guestsrc, unsigned long n) | ||
{ | ||
u64 origin = vcpu->kvm->arch.guest_origin; | ||
u64 memsize = vcpu->kvm->arch.guest_memsize; | ||
|
||
if (guestsrc + n > memsize) | ||
return -EFAULT; | ||
|
||
if (guestsrc + n < guestsrc) | ||
return -EFAULT; | ||
|
||
guestsrc += origin; | ||
|
||
return copy_from_user(to, (void __user *) guestsrc, n); | ||
} | ||
#endif |
Oops, something went wrong.