Skip to content

Commit

Permalink
[S390] Use gmap translation for accessing guest memory
Browse files Browse the repository at this point in the history
This patch removes kvm-s390 internal assumption of a linear mapping
of guest address space to user space. Previously, guest memory was
translated to user addresses using a fixed offset (gmsor). The new
code uses gmap_fault to resolve guest addresses.

Signed-off-by: Carsten Otte <cotte@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
  • Loading branch information
Carsten Otte authored and Martin Schwidefsky committed Jul 24, 2011
1 parent 598841c commit 092670c
Show file tree
Hide file tree
Showing 6 changed files with 194 additions and 110 deletions.
4 changes: 1 addition & 3 deletions arch/s390/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,7 @@ struct kvm_s390_sie_block {
__u32 scaol; /* 0x0064 */
__u8 reserved68[4]; /* 0x0068 */
__u32 todpr; /* 0x006c */
__u8 reserved70[16]; /* 0x0070 */
__u64 gmsor; /* 0x0080 */
__u64 gmslm; /* 0x0088 */
__u8 reserved70[32]; /* 0x0070 */
psw_t gpsw; /* 0x0090 */
__u64 gg14; /* 0x00a0 */
__u64 gg15; /* 0x00a8 */
Expand Down
243 changes: 175 additions & 68 deletions arch/s390/kvm/gaccess.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* gaccess.h - access guest memory
* access.h - access guest memory
*
* Copyright IBM Corp. 2008,2009
*
Expand All @@ -22,20 +22,13 @@ static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
unsigned long guestaddr)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->arch.sie_block->gmsor;
unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);

if (guestaddr < 2 * PAGE_SIZE)
guestaddr += prefix;
else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
guestaddr -= prefix;

if (guestaddr > memsize)
return (void __user __force *) ERR_PTR(-EFAULT);

guestaddr += origin;

return (void __user *) guestaddr;
return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
}

static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
Expand Down Expand Up @@ -141,11 +134,11 @@ static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,

static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
unsigned long guestdest,
const void *from, unsigned long n)
void *from, unsigned long n)
{
int rc;
unsigned long i;
const u8 *data = from;
u8 *data = from;

for (i = 0; i < n; i++) {
rc = put_guest_u8(vcpu, guestdest++, *(data++));
Expand All @@ -155,12 +148,95 @@ static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
return 0;
}

static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
unsigned long guestdest,
void *from, unsigned long n)
{
int r;
void __user *uptr;
unsigned long size;

if (guestdest + n < guestdest)
return -EFAULT;

/* simple case: all within one segment table entry? */
if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);

if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

r = copy_to_user(uptr, from, n);

if (r)
r = -EFAULT;

goto out;
}

/* copy first segment */
uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);

if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

size = PMD_SIZE - (guestdest & ~PMD_MASK);

r = copy_to_user(uptr, from, size);

if (r) {
r = -EFAULT;
goto out;
}
from += size;
n -= size;
guestdest += size;

/* copy full segments */
while (n >= PMD_SIZE) {
uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);

if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

r = copy_to_user(uptr, from, PMD_SIZE);

if (r) {
r = -EFAULT;
goto out;
}
from += PMD_SIZE;
n -= PMD_SIZE;
guestdest += PMD_SIZE;
}

/* copy the tail segment */
if (n) {
uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);

if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

r = copy_to_user(uptr, from, n);

if (r)
r = -EFAULT;
}
out:
return r;
}

static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
unsigned long guestdest,
void *from, unsigned long n)
{
return __copy_to_guest_fast(vcpu, guestdest, from, n);
}

static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
const void *from, unsigned long n)
void *from, unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->arch.sie_block->gmsor;
unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);

if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
goto slowpath;
Expand All @@ -177,15 +253,7 @@ static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
guestdest -= prefix;

if (guestdest + n > memsize)
return -EFAULT;

if (guestdest + n < guestdest)
return -EFAULT;

guestdest += origin;

return copy_to_user((void __user *) guestdest, from, n);
return __copy_to_guest_fast(vcpu, guestdest, from, n);
slowpath:
return __copy_to_guest_slow(vcpu, guestdest, from, n);
}
Expand All @@ -206,74 +274,113 @@ static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
return 0;
}

static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc, unsigned long n)
static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;
unsigned long origin = vcpu->arch.sie_block->gmsor;
unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
int r;
void __user *uptr;
unsigned long size;

if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
goto slowpath;
if (guestsrc + n < guestsrc)
return -EFAULT;

if ((guestsrc < prefix) && (guestsrc + n > prefix))
goto slowpath;
/* simple case: all within one segment table entry? */
if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);

if ((guestsrc < prefix + 2 * PAGE_SIZE)
&& (guestsrc + n > prefix + 2 * PAGE_SIZE))
goto slowpath;
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

if (guestsrc < 2 * PAGE_SIZE)
guestsrc += prefix;
else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
guestsrc -= prefix;
r = copy_from_user(to, uptr, n);

if (guestsrc + n > memsize)
return -EFAULT;
if (r)
r = -EFAULT;

if (guestsrc + n < guestsrc)
return -EFAULT;
goto out;
}

guestsrc += origin;
/* copy first segment */
uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);

return copy_from_user(to, (void __user *) guestsrc, n);
slowpath:
return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
unsigned long guestdest,
const void *from, unsigned long n)
{
unsigned long origin = vcpu->arch.sie_block->gmsor;
unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
size = PMD_SIZE - (guestsrc & ~PMD_MASK);

if (guestdest + n > memsize)
return -EFAULT;
r = copy_from_user(to, uptr, size);

if (guestdest + n < guestdest)
return -EFAULT;
if (r) {
r = -EFAULT;
goto out;
}
to += size;
n -= size;
guestsrc += size;

/* copy full segments */
while (n >= PMD_SIZE) {
uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);

if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

r = copy_from_user(to, uptr, PMD_SIZE);

if (r) {
r = -EFAULT;
goto out;
}
to += PMD_SIZE;
n -= PMD_SIZE;
guestsrc += PMD_SIZE;
}

/* copy the tail segment */
if (n) {
uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);

guestdest += origin;
if (IS_ERR((void __force *) uptr))
return PTR_ERR((void __force *) uptr);

return copy_to_user((void __user *) guestdest, from, n);
r = copy_from_user(to, uptr, n);

if (r)
r = -EFAULT;
}
out:
return r;
}

static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc,
unsigned long n)
{
unsigned long origin = vcpu->arch.sie_block->gmsor;
unsigned long memsize = kvm_s390_vcpu_get_memsize(vcpu);
return __copy_from_guest_fast(vcpu, to, guestsrc, n);
}

if (guestsrc + n > memsize)
return -EFAULT;
static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
unsigned long guestsrc, unsigned long n)
{
unsigned long prefix = vcpu->arch.sie_block->prefix;

if (guestsrc + n < guestsrc)
return -EFAULT;
if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
goto slowpath;

guestsrc += origin;
if ((guestsrc < prefix) && (guestsrc + n > prefix))
goto slowpath;

if ((guestsrc < prefix + 2 * PAGE_SIZE)
&& (guestsrc + n > prefix + 2 * PAGE_SIZE))
goto slowpath;

if (guestsrc < 2 * PAGE_SIZE)
guestsrc += prefix;
else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
guestsrc -= prefix;

return copy_from_user(to, (void __user *) guestsrc, n);
return __copy_from_guest_fast(vcpu, to, guestsrc, n);
slowpath:
return __copy_from_guest_slow(vcpu, to, guestsrc, n);
}
#endif
24 changes: 14 additions & 10 deletions arch/s390/kvm/intercept.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,26 +165,30 @@ static int handle_validity(struct kvm_vcpu *vcpu)
int rc;

vcpu->stat.exit_validity++;
if ((viwhy == 0x37) && (vcpu->arch.sie_block->prefix
<= kvm_s390_vcpu_get_memsize(vcpu) - 2*PAGE_SIZE)) {
rc = fault_in_pages_writeable((char __user *)
vcpu->arch.sie_block->gmsor +
vcpu->arch.sie_block->prefix,
2*PAGE_SIZE);
if (viwhy == 0x37) {
vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
vcpu->arch.gmap);
if (IS_ERR_VALUE(vmaddr)) {
rc = -EOPNOTSUPP;
goto out;
}
rc = fault_in_pages_writeable((char __user *) vmaddr,
PAGE_SIZE);
if (rc) {
/* user will receive sigsegv, exit to user */
rc = -EOPNOTSUPP;
goto out;
}
vmaddr = gmap_fault(vcpu->arch.sie_block->prefix,
vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
vcpu->arch.gmap);
if (IS_ERR_VALUE(vmaddr)) {
rc = -EOPNOTSUPP;
goto out;
}
vmaddr = gmap_fault(vcpu->arch.sie_block->prefix + PAGE_SIZE,
vcpu->arch.gmap);
if (IS_ERR_VALUE(vmaddr)) {
rc = fault_in_pages_writeable((char __user *) vmaddr,
PAGE_SIZE);
if (rc) {
/* user will receive sigsegv, exit to user */
rc = -EOPNOTSUPP;
goto out;
}
Expand Down
4 changes: 2 additions & 2 deletions arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -549,7 +549,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return rc;
}

static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
unsigned long n, int prefix)
{
if (prefix)
Expand All @@ -566,7 +566,7 @@ static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
*/
int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
{
const unsigned char archmode = 1;
unsigned char archmode = 1;
int prefix;

if (addr == KVM_S390_STORE_STATUS_NOADDR) {
Expand Down
Loading

0 comments on commit 092670c

Please sign in to comment.