Skip to content

Commit

Permalink
KVM: MMU: unify slots_lock usage
Browse files Browse the repository at this point in the history
Unify slots_lock acquision around vcpu_run(). This is simpler and less
error-prone.

Also fix some callsites that were not grabbing the lock properly.

[avi: drop slots_lock while in guest mode to avoid holding the lock
      for indefinite periods]

Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
  • Loading branch information
Marcelo Tosatti authored and Avi Kivity committed Apr 27, 2008
1 parent 25c5f22 commit 3200f40
Show file tree
Hide file tree
Showing 5 changed files with 26 additions and 52 deletions.
13 changes: 2 additions & 11 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1204,8 +1204,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)

struct page *page;

down_read(&vcpu->kvm->slots_lock);

down_read(&current->mm->mmap_sem);
if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
gfn &= ~(KVM_PAGES_PER_HPAGE-1);
Expand All @@ -1218,7 +1216,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
/* mmio */
if (is_error_page(page)) {
kvm_release_page_clean(page);
up_read(&vcpu->kvm->slots_lock);
return 1;
}

Expand All @@ -1228,7 +1225,6 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
PT32E_ROOT_LEVEL);
spin_unlock(&vcpu->kvm->mmu_lock);

up_read(&vcpu->kvm->slots_lock);

return r;
}
Expand Down Expand Up @@ -1376,17 +1372,16 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
largepage = 1;
}
page = gfn_to_page(vcpu->kvm, gfn);
up_read(&current->mm->mmap_sem);
if (is_error_page(page)) {
kvm_release_page_clean(page);
up_read(&current->mm->mmap_sem);
return 1;
}
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
largepage, gfn, page, TDP_ROOT_LEVEL);
spin_unlock(&vcpu->kvm->mmu_lock);
up_read(&current->mm->mmap_sem);

return r;
}
Expand Down Expand Up @@ -1808,9 +1803,7 @@ int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
gpa_t gpa;
int r;

down_read(&vcpu->kvm->slots_lock);
gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
up_read(&vcpu->kvm->slots_lock);

spin_lock(&vcpu->kvm->mmu_lock);
r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
Expand Down Expand Up @@ -2063,7 +2056,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
if (r)
return r;

if (!__emulator_write_phys(vcpu, addr, &value, bytes))
if (!emulator_write_phys(vcpu, addr, &value, bytes))
return -EFAULT;

return 1;
Expand Down Expand Up @@ -2127,7 +2120,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
int r;
struct kvm_pv_mmu_op_buffer buffer;

down_read(&vcpu->kvm->slots_lock);
down_read(&current->mm->mmap_sem);

buffer.ptr = buffer.buf;
Expand All @@ -2150,7 +2142,6 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
out:
*ret = buffer.processed;
up_read(&current->mm->mmap_sem);
up_read(&vcpu->kvm->slots_lock);
return r;
}

Expand Down
4 changes: 0 additions & 4 deletions arch/x86/kvm/paging_tmpl.h
Original file line number Diff line number Diff line change
Expand Up @@ -388,7 +388,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (r)
return r;

down_read(&vcpu->kvm->slots_lock);
/*
* Look up the shadow pte for the faulting address.
*/
Expand All @@ -402,7 +401,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
pgprintk("%s: guest page fault\n", __func__);
inject_page_fault(vcpu, addr, walker.error_code);
vcpu->arch.last_pt_write_count = 0; /* reset fork detector */
up_read(&vcpu->kvm->slots_lock);
return 0;
}

Expand All @@ -422,7 +420,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
if (is_error_page(page)) {
pgprintk("gfn %x is mmio\n", walker.gfn);
kvm_release_page_clean(page);
up_read(&vcpu->kvm->slots_lock);
return 1;
}

Expand All @@ -440,7 +437,6 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
++vcpu->stat.pf_fixed;
kvm_mmu_audit(vcpu, "post page fault (fixed)");
spin_unlock(&vcpu->kvm->mmu_lock);
up_read(&vcpu->kvm->slots_lock);

return write_pt;
}
Expand Down
6 changes: 3 additions & 3 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -1505,7 +1505,6 @@ static int init_rmode_tss(struct kvm *kvm)
int ret = 0;
int r;

down_read(&kvm->slots_lock);
r = kvm_clear_guest_page(kvm, fn, 0, PAGE_SIZE);
if (r < 0)
goto out;
Expand All @@ -1528,7 +1527,6 @@ static int init_rmode_tss(struct kvm *kvm)

ret = 1;
out:
up_read(&kvm->slots_lock);
return ret;
}

Expand Down Expand Up @@ -1730,6 +1728,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
u64 msr;
int ret;

down_read(&vcpu->kvm->slots_lock);
if (!init_rmode_tss(vmx->vcpu.kvm)) {
ret = -ENOMEM;
goto out;
Expand Down Expand Up @@ -1833,9 +1832,10 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)

vpid_sync_vcpu_all(vmx);

return 0;
ret = 0;

out:
up_read(&vcpu->kvm->slots_lock);
return ret;
}

Expand Down
Loading

0 comments on commit 3200f40

Please sign in to comment.