Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 103725
b: refs/heads/master
c: 5f94c17
h: refs/heads/master
i:
  103723: 9edba6d
v: v3
  • Loading branch information
Laurent Vivier authored and Avi Kivity committed Jul 20, 2008
1 parent b20b039 commit c4bc354
Show file tree
Hide file tree
Showing 6 changed files with 270 additions and 1 deletion.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 92760499d01ef91518119908eb9b8798b6c9bd3f
refs/heads/master: 5f94c1741bdc7a336553122036e8a779e616ccbf
29 changes: 29 additions & 0 deletions trunk/include/linux/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,30 @@ struct kvm_run {
};
};

/* for KVM_REGISTER_COALESCED_MMIO / KVM_UNREGISTER_COALESCED_MMIO */

struct kvm_coalesced_mmio_zone {
__u64 addr;
__u32 size;
__u32 pad;
};

struct kvm_coalesced_mmio {
__u64 phys_addr;
__u32 len;
__u32 pad;
__u8 data[8];
};

struct kvm_coalesced_mmio_ring {
__u32 first, last;
struct kvm_coalesced_mmio coalesced_mmio[0];
};

#define KVM_COALESCED_MMIO_MAX \
((PAGE_SIZE - sizeof(struct kvm_coalesced_mmio_ring)) / \
sizeof(struct kvm_coalesced_mmio))

/* for KVM_TRANSLATE */
struct kvm_translation {
/* in */
Expand Down Expand Up @@ -346,6 +370,7 @@ struct kvm_trace_rec {
#define KVM_CAP_NOP_IO_DELAY 12
#define KVM_CAP_PV_MMU 13
#define KVM_CAP_MP_STATE 14
#define KVM_CAP_COALESCED_MMIO 15

/*
* ioctls for VM fds
Expand All @@ -371,6 +396,10 @@ struct kvm_trace_rec {
#define KVM_CREATE_PIT _IO(KVMIO, 0x64)
#define KVM_GET_PIT _IOWR(KVMIO, 0x65, struct kvm_pit_state)
#define KVM_SET_PIT _IOR(KVMIO, 0x66, struct kvm_pit_state)
#define KVM_REGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x67, struct kvm_coalesced_mmio_zone)
#define KVM_UNREGISTER_COALESCED_MMIO \
_IOW(KVMIO, 0x68, struct kvm_coalesced_mmio_zone)

/*
* ioctls for vcpu fds
Expand Down
4 changes: 4 additions & 0 deletions trunk/include/linux/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,10 @@ struct kvm {
struct kvm_vm_stat stat;
struct kvm_arch arch;
atomic_t users_count;
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct kvm_coalesced_mmio_dev *coalesced_mmio_dev;
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
#endif
};

/* The guest did something we don't support. */
Expand Down
156 changes: 156 additions & 0 deletions trunk/virt/kvm/coalesced_mmio.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,156 @@
/*
* KVM coalesced MMIO
*
* Copyright (c) 2008 Bull S.A.S.
*
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
*
*/

#include "iodev.h"

#include <linux/kvm_host.h>
#include <linux/kvm.h>

#include "coalesced_mmio.h"

static int coalesced_mmio_in_range(struct kvm_io_device *this,
gpa_t addr, int len, int is_write)
{
struct kvm_coalesced_mmio_dev *dev =
(struct kvm_coalesced_mmio_dev*)this->private;
struct kvm_coalesced_mmio_zone *zone;
int next;
int i;

if (!is_write)
return 0;

/* kvm->lock is taken by the caller and must be not released before
* dev.read/write
*/

/* Are we able to batch it ? */

/* last is the first free entry
* check if we don't meet the first used entry
* there is always one unused entry in the buffer
*/

next = (dev->kvm->coalesced_mmio_ring->last + 1) %
KVM_COALESCED_MMIO_MAX;
if (next == dev->kvm->coalesced_mmio_ring->first) {
/* full */
return 0;
}

/* is it in a batchable area ? */

for (i = 0; i < dev->nb_zones; i++) {
zone = &dev->zone[i];

/* (addr,len) is fully included in
* (zone->addr, zone->size)
*/

if (zone->addr <= addr &&
addr + len <= zone->addr + zone->size)
return 1;
}
return 0;
}

static void coalesced_mmio_write(struct kvm_io_device *this,
gpa_t addr, int len, const void *val)
{
struct kvm_coalesced_mmio_dev *dev =
(struct kvm_coalesced_mmio_dev*)this->private;
struct kvm_coalesced_mmio_ring *ring = dev->kvm->coalesced_mmio_ring;

/* kvm->lock must be taken by caller before call to in_range()*/

/* copy data in first free entry of the ring */

ring->coalesced_mmio[ring->last].phys_addr = addr;
ring->coalesced_mmio[ring->last].len = len;
memcpy(ring->coalesced_mmio[ring->last].data, val, len);
smp_wmb();
ring->last = (ring->last + 1) % KVM_COALESCED_MMIO_MAX;
}

static void coalesced_mmio_destructor(struct kvm_io_device *this)
{
kfree(this);
}

int kvm_coalesced_mmio_init(struct kvm *kvm)
{
struct kvm_coalesced_mmio_dev *dev;

dev = kzalloc(sizeof(struct kvm_coalesced_mmio_dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
dev->dev.write = coalesced_mmio_write;
dev->dev.in_range = coalesced_mmio_in_range;
dev->dev.destructor = coalesced_mmio_destructor;
dev->dev.private = dev;
dev->kvm = kvm;
kvm->coalesced_mmio_dev = dev;
kvm_io_bus_register_dev(&kvm->mmio_bus, &dev->dev);

return 0;
}

int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;

if (dev == NULL)
return -EINVAL;

mutex_lock(&kvm->lock);
if (dev->nb_zones >= KVM_COALESCED_MMIO_ZONE_MAX) {
mutex_unlock(&kvm->lock);
return -ENOBUFS;
}

dev->zone[dev->nb_zones] = *zone;
dev->nb_zones++;

mutex_unlock(&kvm->lock);
return 0;
}

int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone)
{
int i;
struct kvm_coalesced_mmio_dev *dev = kvm->coalesced_mmio_dev;
struct kvm_coalesced_mmio_zone *z;

if (dev == NULL)
return -EINVAL;

mutex_lock(&kvm->lock);

i = dev->nb_zones;
while(i) {
z = &dev->zone[i - 1];

/* unregister all zones
* included in (zone->addr, zone->size)
*/

if (zone->addr <= z->addr &&
z->addr + z->size <= zone->addr + zone->size) {
dev->nb_zones--;
*z = dev->zone[dev->nb_zones];
}
i--;
}

mutex_unlock(&kvm->lock);

return 0;
}
23 changes: 23 additions & 0 deletions trunk/virt/kvm/coalesced_mmio.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
/*
* KVM coalesced MMIO
*
* Copyright (c) 2008 Bull S.A.S.
*
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
*
*/

#define KVM_COALESCED_MMIO_ZONE_MAX 100

struct kvm_coalesced_mmio_dev {
struct kvm_io_device dev;
struct kvm *kvm;
int nb_zones;
struct kvm_coalesced_mmio_zone zone[KVM_COALESCED_MMIO_ZONE_MAX];
};

int kvm_coalesced_mmio_init(struct kvm *kvm);
int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,
struct kvm_coalesced_mmio_zone *zone);
57 changes: 57 additions & 0 deletions trunk/virt/kvm/kvm_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,6 +47,10 @@
#include <asm/uaccess.h>
#include <asm/pgtable.h>

#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
#include "coalesced_mmio.h"
#endif

MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");

Expand Down Expand Up @@ -185,10 +189,23 @@ EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
static struct kvm *kvm_create_vm(void)
{
struct kvm *kvm = kvm_arch_create_vm();
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
struct page *page;
#endif

if (IS_ERR(kvm))
goto out;

#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
page = alloc_page(GFP_KERNEL | __GFP_ZERO);
if (!page) {
kfree(kvm);
return ERR_PTR(-ENOMEM);
}
kvm->coalesced_mmio_ring =
(struct kvm_coalesced_mmio_ring *)page_address(page);
#endif

kvm->mm = current->mm;
atomic_inc(&kvm->mm->mm_count);
spin_lock_init(&kvm->mmu_lock);
Expand All @@ -200,6 +217,9 @@ static struct kvm *kvm_create_vm(void)
spin_lock(&kvm_lock);
list_add(&kvm->vm_list, &vm_list);
spin_unlock(&kvm_lock);
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
kvm_coalesced_mmio_init(kvm);
#endif
out:
return kvm;
}
Expand Down Expand Up @@ -242,6 +262,10 @@ static void kvm_destroy_vm(struct kvm *kvm)
spin_unlock(&kvm_lock);
kvm_io_bus_destroy(&kvm->pio_bus);
kvm_io_bus_destroy(&kvm->mmio_bus);
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
if (kvm->coalesced_mmio_ring != NULL)
free_page((unsigned long)kvm->coalesced_mmio_ring);
#endif
kvm_arch_destroy_vm(kvm);
mmdrop(mm);
}
Expand Down Expand Up @@ -825,6 +849,10 @@ static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
#ifdef CONFIG_X86
else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
page = virt_to_page(vcpu->arch.pio_data);
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
#endif
else
return VM_FAULT_SIGBUS;
Expand Down Expand Up @@ -1148,6 +1176,32 @@ static long kvm_vm_ioctl(struct file *filp,
goto out;
break;
}
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
case KVM_REGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = -ENXIO;
r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
if (r)
goto out;
r = 0;
break;
}
case KVM_UNREGISTER_COALESCED_MMIO: {
struct kvm_coalesced_mmio_zone zone;
r = -EFAULT;
if (copy_from_user(&zone, argp, sizeof zone))
goto out;
r = -ENXIO;
r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
if (r)
goto out;
r = 0;
break;
}
#endif
default:
r = kvm_arch_vm_ioctl(filp, ioctl, arg);
}
Expand Down Expand Up @@ -1231,6 +1285,9 @@ static long kvm_dev_ioctl(struct file *filp,
r = PAGE_SIZE; /* struct kvm_run */
#ifdef CONFIG_X86
r += PAGE_SIZE; /* pio data page */
#endif
#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
r += PAGE_SIZE; /* coalesced mmio ring page */
#endif
break;
case KVM_TRACE_ENABLE:
Expand Down

0 comments on commit c4bc354

Please sign in to comment.