Skip to content

Commit

Permalink
kvm/ppc/booke: Hold srcu lock when calling gfn functions
Browse files Browse the repository at this point in the history
KVM core expects arch code to acquire the srcu lock when calling
gfn_to_memslot and similar functions.

Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
  • Loading branch information
Scott Wood authored and Gleb Natapov committed Jun 11, 2013
1 parent 2b6398f commit f1e8902
Show file tree
Hide file tree
Showing 3 changed files with 17 additions and 0 deletions.
5 changes: 5 additions & 0 deletions arch/powerpc/kvm/44x_tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -441,6 +441,7 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
struct kvmppc_44x_tlbe *tlbe;
unsigned int gtlb_index;
int idx;

gtlb_index = kvmppc_get_gpr(vcpu, ra);
if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
Expand Down Expand Up @@ -473,6 +474,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
return EMULATE_FAIL;
}

idx = srcu_read_lock(&vcpu->kvm->srcu);

if (tlbe_is_host_safe(vcpu, tlbe)) {
gva_t eaddr;
gpa_t gpaddr;
Expand All @@ -489,6 +492,8 @@ int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
}

srcu_read_unlock(&vcpu->kvm->srcu, idx);

trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
tlbe->word2);

Expand Down
7 changes: 7 additions & 0 deletions arch/powerpc/kvm/booke.c
Original file line number Diff line number Diff line change
Expand Up @@ -832,6 +832,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
int r = RESUME_HOST;
int s;
int idx;

/* update before a new last_exit_type is rewritten */
kvmppc_update_timing_stats(vcpu);
Expand Down Expand Up @@ -1053,6 +1054,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
}

idx = srcu_read_lock(&vcpu->kvm->srcu);

gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;

Expand All @@ -1075,6 +1078,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_account_exit(vcpu, MMIO_EXITS);
}

srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}

Expand All @@ -1098,6 +1102,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,

kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);

idx = srcu_read_lock(&vcpu->kvm->srcu);

gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
gfn = gpaddr >> PAGE_SHIFT;

Expand All @@ -1114,6 +1120,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
}

srcu_read_unlock(&vcpu->kvm->srcu, idx);
break;
}

Expand Down
5 changes: 5 additions & 0 deletions arch/powerpc/kvm/e500_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -396,6 +396,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
struct kvm_book3e_206_tlb_entry *gtlbe;
int tlbsel, esel;
int recal = 0;
int idx;

tlbsel = get_tlb_tlbsel(vcpu);
esel = get_tlb_esel(vcpu, tlbsel);
Expand Down Expand Up @@ -430,6 +431,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_set_tlb1map_range(vcpu, gtlbe);
}

idx = srcu_read_lock(&vcpu->kvm->srcu);

/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
if (tlbe_is_host_safe(vcpu, gtlbe)) {
u64 eaddr = get_tlb_eaddr(gtlbe);
Expand All @@ -444,6 +447,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
}

srcu_read_unlock(&vcpu->kvm->srcu, idx);

kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
return EMULATE_DONE;
}
Expand Down

0 comments on commit f1e8902

Please sign in to comment.