Skip to content

Commit

Permalink
RISC-V: KVM: Use NACL HFENCEs for KVM request based HFENCEs
Browse files Browse the repository at this point in the history
When running under some other hypervisor, use SBI NACL based HFENCEs
for TLB shoot-down via KVM requests. This makes HFENCEs faster whenever
SBI nested acceleration is available.

Signed-off-by: Anup Patel <apatel@ventanamicro.com>
Reviewed-by: Atish Patra <atishp@rivosinc.com>
Link: https://lore.kernel.org/r/20241020194734.58686-14-apatel@ventanamicro.com
Signed-off-by: Anup Patel <anup@brainfault.org>
  • Loading branch information
Anup Patel authored and Anup Patel committed Oct 28, 2024
1 parent 3e7d154 commit 5bdecd8
Showing 1 changed file with 40 additions and 17 deletions.
57 changes: 40 additions & 17 deletions arch/riscv/kvm/tlb.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <asm/csr.h>
#include <asm/cpufeature.h>
#include <asm/insn-def.h>
#include <asm/kvm_nacl.h>

#define has_svinval() riscv_has_extension_unlikely(RISCV_ISA_EXT_SVINVAL)

Expand Down Expand Up @@ -186,18 +187,24 @@ void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu)

void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu)
{
struct kvm_vmid *vmid;
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
unsigned long vmid = READ_ONCE(v->vmid);

vmid = &vcpu->kvm->arch.vmid;
kvm_riscv_local_hfence_gvma_vmid_all(READ_ONCE(vmid->vmid));
if (kvm_riscv_nacl_available())
nacl_hfence_gvma_vmid_all(nacl_shmem(), vmid);
else
kvm_riscv_local_hfence_gvma_vmid_all(vmid);
}

void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu)
{
struct kvm_vmid *vmid;
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;
unsigned long vmid = READ_ONCE(v->vmid);

vmid = &vcpu->kvm->arch.vmid;
kvm_riscv_local_hfence_vvma_all(READ_ONCE(vmid->vmid));
if (kvm_riscv_nacl_available())
nacl_hfence_vvma_all(nacl_shmem(), vmid);
else
kvm_riscv_local_hfence_vvma_all(vmid);
}

static bool vcpu_hfence_dequeue(struct kvm_vcpu *vcpu,
Expand Down Expand Up @@ -251,6 +258,7 @@ static bool vcpu_hfence_enqueue(struct kvm_vcpu *vcpu,

void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
{
unsigned long vmid;
struct kvm_riscv_hfence d = { 0 };
struct kvm_vmid *v = &vcpu->kvm->arch.vmid;

Expand All @@ -259,26 +267,41 @@ void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu)
case KVM_RISCV_HFENCE_UNKNOWN:
break;
case KVM_RISCV_HFENCE_GVMA_VMID_GPA:
kvm_riscv_local_hfence_gvma_vmid_gpa(
READ_ONCE(v->vmid),
d.addr, d.size, d.order);
vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
nacl_hfence_gvma_vmid(nacl_shmem(), vmid,
d.addr, d.size, d.order);
else
kvm_riscv_local_hfence_gvma_vmid_gpa(vmid, d.addr,
d.size, d.order);
break;
case KVM_RISCV_HFENCE_VVMA_ASID_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
kvm_riscv_local_hfence_vvma_asid_gva(
READ_ONCE(v->vmid), d.asid,
d.addr, d.size, d.order);
vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
nacl_hfence_vvma_asid(nacl_shmem(), vmid, d.asid,
d.addr, d.size, d.order);
else
kvm_riscv_local_hfence_vvma_asid_gva(vmid, d.asid, d.addr,
d.size, d.order);
break;
case KVM_RISCV_HFENCE_VVMA_ASID_ALL:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD);
kvm_riscv_local_hfence_vvma_asid_all(
READ_ONCE(v->vmid), d.asid);
vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
nacl_hfence_vvma_asid_all(nacl_shmem(), vmid, d.asid);
else
kvm_riscv_local_hfence_vvma_asid_all(vmid, d.asid);
break;
case KVM_RISCV_HFENCE_VVMA_GVA:
kvm_riscv_vcpu_pmu_incr_fw(vcpu, SBI_PMU_FW_HFENCE_VVMA_RCVD);
kvm_riscv_local_hfence_vvma_gva(
READ_ONCE(v->vmid),
d.addr, d.size, d.order);
vmid = READ_ONCE(v->vmid);
if (kvm_riscv_nacl_available())
nacl_hfence_vvma(nacl_shmem(), vmid,
d.addr, d.size, d.order);
else
kvm_riscv_local_hfence_vvma_gva(vmid, d.addr,
d.size, d.order);
break;
default:
break;
Expand Down

0 comments on commit 5bdecd8

Please sign in to comment.