Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 282011
b: refs/heads/master
c: 95d4c16
h: refs/heads/master
i:
  282009: 039ea78
  282007: f2056bf
v: v3
  • Loading branch information
Takuya Yoshikawa authored and Avi Kivity committed Dec 27, 2011
1 parent 58dc35b commit 31b8bf5
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 12 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 7850ac5420803996e2960d15b924021f28e0dffc
refs/heads/master: 95d4c16ce78cb6b7549a09159c409d52ddd18dae
2 changes: 2 additions & 0 deletions trunk/arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -648,6 +648,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
struct kvm_memory_slot *slot);
void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
Expand Down
14 changes: 10 additions & 4 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1023,15 +1023,13 @@ static void drop_spte(struct kvm *kvm, u64 *sptep)
rmap_remove(kvm, sptep);
}

static int rmap_write_protect(struct kvm *kvm, u64 gfn)
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
struct kvm_memory_slot *slot)
{
struct kvm_memory_slot *slot;
unsigned long *rmapp;
u64 *spte;
int i, write_protected = 0;

slot = gfn_to_memslot(kvm, gfn);

rmapp = __gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL, slot);
spte = rmap_next(kvm, rmapp, NULL);
while (spte) {
Expand Down Expand Up @@ -1066,6 +1064,14 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
return write_protected;
}

static int rmap_write_protect(struct kvm *kvm, u64 gfn)
{
struct kvm_memory_slot *slot;

slot = gfn_to_memslot(kvm, gfn);
return kvm_mmu_rmap_write_protect(kvm, gfn, slot);
}

static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
unsigned long data)
{
Expand Down
58 changes: 51 additions & 7 deletions trunk/arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -3460,6 +3460,50 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
return 0;
}

/**
* write_protect_slot - write protect a slot for dirty logging
* @kvm: the kvm instance
* @memslot: the slot we protect
* @dirty_bitmap: the bitmap indicating which pages are dirty
* @nr_dirty_pages: the number of dirty pages
*
* We have two ways to find all sptes to protect:
* 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and
* checks ones that have a spte mapping a page in the slot.
* 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
*
* Generally speaking, if there are not so many dirty pages compared to the
* number of shadow pages, we should use the latter.
*
* Note that letting others write into a page marked dirty in the old bitmap
* by using the remaining tlb entry is not a problem. That page will become
* write protected again when we flush the tlb and then be reported dirty to
* the user space by copying the old bitmap.
*/
static void write_protect_slot(struct kvm *kvm,
struct kvm_memory_slot *memslot,
unsigned long *dirty_bitmap,
unsigned long nr_dirty_pages)
{
/* Not many dirty pages compared to # of shadow pages. */
if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
unsigned long gfn_offset;

for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
unsigned long gfn = memslot->base_gfn + gfn_offset;

spin_lock(&kvm->mmu_lock);
kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
spin_unlock(&kvm->mmu_lock);
}
kvm_flush_remote_tlbs(kvm);
} else {
spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, memslot->id);
spin_unlock(&kvm->mmu_lock);
}
}

/*
* Get (and clear) the dirty memory log for a memory slot.
*/
Expand All @@ -3468,7 +3512,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
{
int r;
struct kvm_memory_slot *memslot;
unsigned long n;
unsigned long n, nr_dirty_pages;

mutex_lock(&kvm->slots_lock);

Expand All @@ -3482,9 +3526,10 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
goto out;

n = kvm_dirty_bitmap_bytes(memslot);
nr_dirty_pages = memslot->nr_dirty_pages;

/* If nothing is dirty, don't bother messing with page tables. */
if (memslot->nr_dirty_pages) {
if (nr_dirty_pages) {
struct kvm_memslots *slots, *old_slots;
unsigned long *dirty_bitmap;

Expand All @@ -3498,8 +3543,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (!slots)
goto out;
memcpy(slots, kvm->memslots, sizeof(struct kvm_memslots));
slots->memslots[log->slot].dirty_bitmap = dirty_bitmap;
slots->memslots[log->slot].nr_dirty_pages = 0;
memslot = &slots->memslots[log->slot];
memslot->dirty_bitmap = dirty_bitmap;
memslot->nr_dirty_pages = 0;
slots->generation++;

old_slots = kvm->memslots;
Expand All @@ -3508,9 +3554,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
kfree(old_slots);

spin_lock(&kvm->mmu_lock);
kvm_mmu_slot_remove_write_access(kvm, log->slot);
spin_unlock(&kvm->mmu_lock);
write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages);

r = -EFAULT;
if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n))
Expand Down

0 comments on commit 31b8bf5

Please sign in to comment.