Skip to content

Commit

Permalink
KVM: PPC: Book3S HV: add virtual mode handlers for HPT hcalls and pag…
Browse files Browse the repository at this point in the history
…e faults

In order to support hash guests in the P9 path (which does not do real
mode hcalls or page fault handling), these real-mode hash specific
interrupts need to be implemented in virt mode.

Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20210528090752.3542186-29-npiggin@gmail.com
  • Loading branch information
Nicholas Piggin authored and Michael Ellerman committed Jun 10, 2021
1 parent a9aa86e commit 6165d5d
Show file tree
Hide file tree
Showing 2 changed files with 144 additions and 9 deletions.
145 changes: 136 additions & 9 deletions arch/powerpc/kvm/book3s_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -939,6 +939,52 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
return RESUME_HOST;

switch (req) {
case H_REMOVE:
ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_ENTER:
ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6),
kvmppc_get_gpr(vcpu, 7));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_READ:
ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CLEAR_MOD:
ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_CLEAR_REF:
ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_PROTECT:
ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4),
kvmppc_get_gpr(vcpu, 5),
kvmppc_get_gpr(vcpu, 6));
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;
case H_BULK_REMOVE:
ret = kvmppc_h_bulk_remove(vcpu);
if (ret == H_TOO_HARD)
return RESUME_HOST;
break;

case H_CEDE:
break;
case H_PROD:
Expand Down Expand Up @@ -1138,6 +1184,7 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
default:
return RESUME_HOST;
}
WARN_ON_ONCE(ret == H_TOO_HARD);
kvmppc_set_gpr(vcpu, 3, ret);
vcpu->arch.hcall_needed = 0;
return RESUME_GUEST;
Expand Down Expand Up @@ -1438,22 +1485,102 @@ static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu,
* We get these next two if the guest accesses a page which it thinks
* it has mapped but which is not actually present, either because
* it is for an emulated I/O device or because the corresonding
* host page has been paged out. Any other HDSI/HISI interrupts
* have been handled already.
* host page has been paged out.
*
* Any other HDSI/HISI interrupts have been handled already for P7/8
* guests. For POWER9 hash guests not using rmhandlers, basic hash
* fault handling is done here.
*/
case BOOK3S_INTERRUPT_H_DATA_STORAGE:
r = RESUME_PAGE_FAULT;
if (vcpu->arch.fault_dsisr == HDSISR_CANARY)
case BOOK3S_INTERRUPT_H_DATA_STORAGE: {
unsigned long vsid;
long err;

if (vcpu->arch.fault_dsisr == HDSISR_CANARY) {
r = RESUME_GUEST; /* Just retry if it's the canary */
break;
}

if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* Radix doesn't require anything, and pre-ISAv3.0 hash
* already attempted to handle this in rmhandlers. The
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
r = RESUME_PAGE_FAULT;
break;
}

if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) {
kvmppc_core_queue_data_storage(vcpu,
vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
r = RESUME_GUEST;
break;
}

if (!(vcpu->arch.shregs.msr & MSR_DR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;

err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
vsid, vcpu->arch.fault_dsisr, true);
if (err == 0) {
r = RESUME_GUEST;
} else if (err == -1 || err == -2) {
r = RESUME_PAGE_FAULT;
} else {
kvmppc_core_queue_data_storage(vcpu,
vcpu->arch.fault_dar, err);
r = RESUME_GUEST;
}
break;
case BOOK3S_INTERRUPT_H_INST_STORAGE:
}
case BOOK3S_INTERRUPT_H_INST_STORAGE: {
unsigned long vsid;
long err;

vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr &
DSISR_SRR1_MATCH_64S;
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) {
/*
* Radix doesn't require anything, and pre-ISAv3.0 hash
* already attempted to handle this in rmhandlers. The
* hash fault handling below is v3 only (it uses ASDR
* via fault_gpa).
*/
if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE)
vcpu->arch.fault_dsisr |= DSISR_ISSTORE;
r = RESUME_PAGE_FAULT;
break;
}

if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) {
kvmppc_core_queue_inst_storage(vcpu,
vcpu->arch.fault_dsisr);
r = RESUME_GUEST;
break;
}

if (!(vcpu->arch.shregs.msr & MSR_IR))
vsid = vcpu->kvm->arch.vrma_slb_v;
else
vsid = vcpu->arch.fault_gpa;

err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar,
vsid, vcpu->arch.fault_dsisr, false);
if (err == 0) {
r = RESUME_GUEST;
} else if (err == -1) {
r = RESUME_PAGE_FAULT;
} else {
kvmppc_core_queue_inst_storage(vcpu, err);
r = RESUME_GUEST;
}
break;
}

/*
* This occurs if the guest executes an illegal instruction.
* If the guest debug is disabled, generate a program interrupt
Expand Down
8 changes: 8 additions & 0 deletions arch/powerpc/kvm/book3s_hv_rm_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -409,6 +409,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
}
EXPORT_SYMBOL_GPL(kvmppc_h_enter);

#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
Expand Down Expand Up @@ -553,6 +554,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]);
}
EXPORT_SYMBOL_GPL(kvmppc_h_remove);

long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
Expand Down Expand Up @@ -671,6 +673,7 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)

return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);

long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index, unsigned long avpn)
Expand Down Expand Up @@ -741,6 +744,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,

return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_h_protect);

long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
Expand Down Expand Up @@ -781,6 +785,7 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
}
return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_h_read);

long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
Expand Down Expand Up @@ -829,6 +834,7 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);

long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
Expand Down Expand Up @@ -876,6 +882,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);

static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
unsigned long gpa, int writing, unsigned long *hpa,
Expand Down Expand Up @@ -1294,3 +1301,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,

return -1; /* send fault up to host kernel mode */
}
EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);

0 comments on commit 6165d5d

Please sign in to comment.