From aadc110a8e8bdf858c5bbe5ebc22da68cf7f9f9b Mon Sep 17 00:00:00 2001 From: Takuya Yoshikawa Date: Sun, 5 Feb 2012 20:42:41 +0900 Subject: [PATCH] --- yaml --- r: 297144 b: refs/heads/master c: 6dbf79e7164e9a86c1e466062c48498142ae6128 h: refs/heads/master v: v3 --- [refs] | 2 +- trunk/arch/x86/kvm/x86.c | 11 +++++------ 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/[refs] b/[refs] index c6b987ca4d95..f294bef77565 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 10166744b80a41c30d82bc6e11140f5b28d257ab +refs/heads/master: 6dbf79e7164e9a86c1e466062c48498142ae6128 diff --git a/trunk/arch/x86/kvm/x86.c b/trunk/arch/x86/kvm/x86.c index e86f9b22eaca..3df0b7a140b0 100644 --- a/trunk/arch/x86/kvm/x86.c +++ b/trunk/arch/x86/kvm/x86.c @@ -3065,6 +3065,8 @@ static void write_protect_slot(struct kvm *kvm, unsigned long *dirty_bitmap, unsigned long nr_dirty_pages) { + spin_lock(&kvm->mmu_lock); + /* Not many dirty pages compared to # of shadow pages. */ if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) { unsigned long gfn_offset; @@ -3072,16 +3074,13 @@ static void write_protect_slot(struct kvm *kvm, for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) { unsigned long gfn = memslot->base_gfn + gfn_offset; - spin_lock(&kvm->mmu_lock); kvm_mmu_rmap_write_protect(kvm, gfn, memslot); - spin_unlock(&kvm->mmu_lock); } kvm_flush_remote_tlbs(kvm); - } else { - spin_lock(&kvm->mmu_lock); + } else kvm_mmu_slot_remove_write_access(kvm, memslot->id); - spin_unlock(&kvm->mmu_lock); - } + + spin_unlock(&kvm->mmu_lock); } /*