Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 146589
b: refs/heads/master
c: 2668dab
h: refs/heads/master
i:
  146587: b610242
v: v3
  • Loading branch information
Carsten Otte authored and Avi Kivity committed Jun 10, 2009
1 parent 0917060 commit 2f25bfa
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 6 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 58f8ac279a8c46eb0a3193edd521ed2e41c4f914
refs/heads/master: 2668dab794272f0898491acaf1e77e9a005abc0f
36 changes: 31 additions & 5 deletions trunk/arch/s390/kvm/kvm-s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -657,14 +657,16 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
struct kvm_memory_slot old,
int user_alloc)
{
int i;

/* A few sanity checks. We can have exactly one memory slot which has
to start at guest virtual zero and which has to be located at a
page boundary in userland and which has to end at a page boundary.
The memory in userland is ok to be fragmented into various different
vmas. It is okay to mmap() and munmap() stuff in this slot after
doing this call at any time */

if (mem->slot)
if (mem->slot || kvm->arch.guest_memsize)
return -EINVAL;

if (mem->guest_phys_addr)
Expand All @@ -676,15 +678,39 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
if (mem->memory_size & (PAGE_SIZE - 1))
return -EINVAL;

if (!user_alloc)
return -EINVAL;

/* lock all vcpus */
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (!kvm->vcpus[i])
continue;
if (!mutex_trylock(&kvm->vcpus[i]->mutex))
goto fail_out;
}

kvm->arch.guest_origin = mem->userspace_addr;
kvm->arch.guest_memsize = mem->memory_size;

/* FIXME: we do want to interrupt running CPUs and update their memory
configuration now to avoid race conditions. But hey, changing the
memory layout while virtual CPUs are running is usually bad
programming practice. */
/* update sie control blocks, and unlock all vcpus */
for (i = 0; i < KVM_MAX_VCPUS; ++i) {
if (kvm->vcpus[i]) {
kvm->vcpus[i]->arch.sie_block->gmsor =
kvm->arch.guest_origin;
kvm->vcpus[i]->arch.sie_block->gmslm =
kvm->arch.guest_memsize +
kvm->arch.guest_origin +
VIRTIODESCSPACE - 1ul;
mutex_unlock(&kvm->vcpus[i]->mutex);
}
}

return 0;

fail_out:
for (; i >= 0; i--)
mutex_unlock(&kvm->vcpus[i]->mutex);
return -EINVAL;
}

void kvm_arch_flush_shadow(struct kvm *kvm)
Expand Down

0 comments on commit 2f25bfa

Please sign in to comment.