Skip to content

Commit

Permalink
Merge tag 'kvm-riscv-6.16-1' of https://github.com/kvm-riscv/linux in…
Browse files Browse the repository at this point in the history
…to HEAD

KVM/riscv changes for 6.16

- Add vector registers to get-reg-list selftest
- VCPU reset related improvements
- Remove scounteren initialization from VCPU reset
- Support VCPU reset from userspace using set_mpstate() ioctl
  • Loading branch information
Paolo Bonzini committed May 26, 2025
2 parents 4d526b0 + 7917be1 commit 1f7c9d5
Showing 22 changed files with 374 additions and 152 deletions.
11 changes: 11 additions & 0 deletions Documentation/virt/kvm/api.rst
Original file line number Diff line number Diff line change
@@ -8541,6 +8541,17 @@ aforementioned registers before the first KVM_RUN. These registers are VM
scoped, meaning that the same set of values are presented on all vCPUs in a
given VM.

7.43 KVM_CAP_RISCV_MP_STATE_RESET
---------------------------------

:Architectures: riscv
:Type: VM
:Parameters: None
:Returns: 0 on success, -EINVAL if arg[0] is not zero

When this capability is enabled, KVM resets the VCPU when setting
MP_STATE_INIT_RECEIVED through IOCTL. The original MP_STATE is preserved.

8. Other capabilities.
======================

3 changes: 0 additions & 3 deletions arch/riscv/include/asm/kvm_aia.h
Original file line number Diff line number Diff line change
@@ -63,9 +63,6 @@ struct kvm_vcpu_aia {
/* CPU AIA CSR context of Guest VCPU */
struct kvm_vcpu_aia_csr guest_csr;

/* CPU AIA CSR context upon Guest VCPU reset */
struct kvm_vcpu_aia_csr guest_reset_csr;

/* Guest physical address of IMSIC for this VCPU */
gpa_t imsic_addr;

17 changes: 11 additions & 6 deletions arch/riscv/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
@@ -119,6 +119,9 @@ struct kvm_arch {

/* AIA Guest/VM context */
struct kvm_aia aia;

/* KVM_CAP_RISCV_MP_STATE_RESET */
bool mp_state_reset;
};

struct kvm_cpu_trap {
@@ -193,6 +196,12 @@ struct kvm_vcpu_smstateen_csr {
unsigned long sstateen0;
};

struct kvm_vcpu_reset_state {
spinlock_t lock;
unsigned long pc;
unsigned long a1;
};

struct kvm_vcpu_arch {
/* VCPU ran at least once */
bool ran_atleast_once;
@@ -227,12 +236,8 @@ struct kvm_vcpu_arch {
/* CPU Smstateen CSR context of Guest VCPU */
struct kvm_vcpu_smstateen_csr smstateen_csr;

/* CPU context upon Guest VCPU reset */
struct kvm_cpu_context guest_reset_context;
spinlock_t reset_cntx_lock;

/* CPU CSR context upon Guest VCPU reset */
struct kvm_vcpu_csr guest_reset_csr;
/* CPU reset state of Guest VCPU */
struct kvm_vcpu_reset_state reset_state;

/*
* VCPU interrupts
3 changes: 3 additions & 0 deletions arch/riscv/include/asm/kvm_vcpu_sbi.h
Original file line number Diff line number Diff line change
@@ -55,6 +55,9 @@ void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_run *run,
u32 type, u64 flags);
void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
unsigned long pc, unsigned long a1);
void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg);
6 changes: 2 additions & 4 deletions arch/riscv/include/asm/kvm_vcpu_vector.h
Original file line number Diff line number Diff line change
@@ -33,8 +33,7 @@ void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
unsigned long *isa);
void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx);
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
#else

@@ -62,8 +61,7 @@ static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cn
{
}

static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx)
static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
{
return 0;
}
10 changes: 10 additions & 0 deletions arch/riscv/kernel/head.S
Original file line number Diff line number Diff line change
@@ -131,6 +131,12 @@ secondary_start_sbi:
csrw CSR_IE, zero
csrw CSR_IP, zero

#ifndef CONFIG_RISCV_M_MODE
/* Enable time CSR */
li t0, 0x2
csrw CSR_SCOUNTEREN, t0
#endif

/* Load the global pointer */
load_global_pointer

@@ -226,6 +232,10 @@ SYM_CODE_START(_start_kernel)
* to hand it to us.
*/
csrr a0, CSR_MHARTID
#else
/* Enable time CSR */
li t0, 0x2
csrw CSR_SCOUNTEREN, t0
#endif /* CONFIG_RISCV_M_MODE */

/* Load the global pointer */
2 changes: 1 addition & 1 deletion arch/riscv/kvm/Kconfig
Original file line number Diff line number Diff line change
@@ -18,7 +18,7 @@ menuconfig VIRTUALIZATION
if VIRTUALIZATION

config KVM
tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)"
tristate "Kernel-based Virtual Machine (KVM) support"
depends on RISCV_SBI && MMU
select HAVE_KVM_IRQCHIP
select HAVE_KVM_IRQ_ROUTING
4 changes: 1 addition & 3 deletions arch/riscv/kvm/aia_device.c
Original file line number Diff line number Diff line change
@@ -526,12 +526,10 @@ int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu)
void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
struct kvm_vcpu_aia_csr *reset_csr =
&vcpu->arch.aia_context.guest_reset_csr;

if (!kvm_riscv_aia_available())
return;
memcpy(csr, reset_csr, sizeof(*csr));
memset(csr, 0, sizeof(*csr));

/* Proceed only if AIA was initialized successfully */
if (!kvm_riscv_aia_initialized(vcpu->kvm))
64 changes: 35 additions & 29 deletions arch/riscv/kvm/vcpu.c
Original file line number Diff line number Diff line change
@@ -51,12 +51,33 @@ const struct kvm_stats_header kvm_vcpu_stats_header = {
sizeof(kvm_vcpu_stats_desc),
};

static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
static void kvm_riscv_vcpu_context_reset(struct kvm_vcpu *vcpu,
bool kvm_sbi_reset)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context;
void *vector_datap = cntx->vector.datap;

memset(cntx, 0, sizeof(*cntx));
memset(csr, 0, sizeof(*csr));
memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));

/* Restore datap as it's not a part of the guest context. */
cntx->vector.datap = vector_datap;

if (kvm_sbi_reset)
kvm_riscv_vcpu_sbi_load_reset_state(vcpu);

/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
cntx->sstatus = SR_SPP | SR_SPIE;

cntx->hstatus |= HSTATUS_VTW;
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
}

static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu, bool kvm_sbi_reset)
{
bool loaded;

/**
@@ -71,13 +92,7 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)

vcpu->arch.last_exit_cpu = -1;

memcpy(csr, reset_csr, sizeof(*csr));

spin_lock(&vcpu->arch.reset_cntx_lock);
memcpy(cntx, reset_cntx, sizeof(*cntx));
spin_unlock(&vcpu->arch.reset_cntx_lock);

memset(&vcpu->arch.smstateen_csr, 0, sizeof(vcpu->arch.smstateen_csr));
kvm_riscv_vcpu_context_reset(vcpu, kvm_sbi_reset);

kvm_riscv_vcpu_fp_reset(vcpu);

@@ -112,8 +127,6 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
{
int rc;
struct kvm_cpu_context *cntx;
struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;

spin_lock_init(&vcpu->arch.mp_state_lock);

@@ -133,24 +146,11 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
/* Setup VCPU hfence queue */
spin_lock_init(&vcpu->arch.hfence_lock);

/* Setup reset state of shadow SSTATUS and HSTATUS CSRs */
spin_lock_init(&vcpu->arch.reset_cntx_lock);
spin_lock_init(&vcpu->arch.reset_state.lock);

spin_lock(&vcpu->arch.reset_cntx_lock);
cntx = &vcpu->arch.guest_reset_context;
cntx->sstatus = SR_SPP | SR_SPIE;
cntx->hstatus = 0;
cntx->hstatus |= HSTATUS_VTW;
cntx->hstatus |= HSTATUS_SPVP;
cntx->hstatus |= HSTATUS_SPV;
spin_unlock(&vcpu->arch.reset_cntx_lock);

if (kvm_riscv_vcpu_alloc_vector_context(vcpu, cntx))
if (kvm_riscv_vcpu_alloc_vector_context(vcpu))
return -ENOMEM;

/* By default, make CY, TM, and IR counters accessible in VU mode */
reset_csr->scounteren = 0x7;

/* Setup VCPU timer */
kvm_riscv_vcpu_timer_init(vcpu);

@@ -169,7 +169,7 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
kvm_riscv_vcpu_sbi_init(vcpu);

/* Reset VCPU */
kvm_riscv_reset_vcpu(vcpu);
kvm_riscv_reset_vcpu(vcpu, false);

return 0;
}
@@ -518,6 +518,12 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
case KVM_MP_STATE_STOPPED:
__kvm_riscv_vcpu_power_off(vcpu);
break;
case KVM_MP_STATE_INIT_RECEIVED:
if (vcpu->kvm->arch.mp_state_reset)
kvm_riscv_reset_vcpu(vcpu, false);
else
ret = -EINVAL;
break;
default:
ret = -EINVAL;
}
@@ -706,7 +712,7 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
}

if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
kvm_riscv_reset_vcpu(vcpu);
kvm_riscv_reset_vcpu(vcpu, true);

if (kvm_check_request(KVM_REQ_UPDATE_HGATP, vcpu))
kvm_riscv_gstage_update_hgatp(vcpu);
32 changes: 30 additions & 2 deletions arch/riscv/kvm/vcpu_sbi.c
Original file line number Diff line number Diff line change
@@ -143,9 +143,9 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
struct kvm_vcpu *tmp;

kvm_for_each_vcpu(i, tmp, vcpu->kvm) {
spin_lock(&vcpu->arch.mp_state_lock);
spin_lock(&tmp->arch.mp_state_lock);
WRITE_ONCE(tmp->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
spin_unlock(&vcpu->arch.mp_state_lock);
spin_unlock(&tmp->arch.mp_state_lock);
}
kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);

@@ -156,6 +156,34 @@ void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
}

void kvm_riscv_vcpu_sbi_request_reset(struct kvm_vcpu *vcpu,
unsigned long pc, unsigned long a1)
{
spin_lock(&vcpu->arch.reset_state.lock);
vcpu->arch.reset_state.pc = pc;
vcpu->arch.reset_state.a1 = a1;
spin_unlock(&vcpu->arch.reset_state.lock);

kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
}

void kvm_riscv_vcpu_sbi_load_reset_state(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
struct kvm_vcpu_reset_state *reset_state = &vcpu->arch.reset_state;

cntx->a0 = vcpu->vcpu_id;

spin_lock(&vcpu->arch.reset_state.lock);
cntx->sepc = reset_state->pc;
cntx->a1 = reset_state->a1;
spin_unlock(&vcpu->arch.reset_state.lock);

cntx->sstatus &= ~SR_SIE;
csr->vsatp = 0;
}

int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
13 changes: 1 addition & 12 deletions arch/riscv/kvm/vcpu_sbi_hsm.c
Original file line number Diff line number Diff line change
@@ -15,7 +15,6 @@

static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *reset_cntx;
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_vcpu *target_vcpu;
unsigned long target_vcpuid = cp->a0;
@@ -32,17 +31,7 @@ static int kvm_sbi_hsm_vcpu_start(struct kvm_vcpu *vcpu)
goto out;
}

spin_lock(&target_vcpu->arch.reset_cntx_lock);
reset_cntx = &target_vcpu->arch.guest_reset_context;
/* start address */
reset_cntx->sepc = cp->a1;
/* target vcpu id to start */
reset_cntx->a0 = target_vcpuid;
/* private data passed from kernel */
reset_cntx->a1 = cp->a2;
spin_unlock(&target_vcpu->arch.reset_cntx_lock);

kvm_make_request(KVM_REQ_VCPU_RESET, target_vcpu);
kvm_riscv_vcpu_sbi_request_reset(target_vcpu, cp->a1, cp->a2);

__kvm_riscv_vcpu_power_on(target_vcpu);

10 changes: 1 addition & 9 deletions arch/riscv/kvm/vcpu_sbi_system.c
Original file line number Diff line number Diff line change
@@ -13,7 +13,6 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
struct kvm_vcpu_sbi_return *retdata)
{
struct kvm_cpu_context *cp = &vcpu->arch.guest_context;
struct kvm_cpu_context *reset_cntx;
unsigned long funcid = cp->a6;
unsigned long hva, i;
struct kvm_vcpu *tmp;
@@ -45,14 +44,7 @@ static int kvm_sbi_ext_susp_handler(struct kvm_vcpu *vcpu, struct kvm_run *run,
}
}

spin_lock(&vcpu->arch.reset_cntx_lock);
reset_cntx = &vcpu->arch.guest_reset_context;
reset_cntx->sepc = cp->a1;
reset_cntx->a0 = vcpu->vcpu_id;
reset_cntx->a1 = cp->a2;
spin_unlock(&vcpu->arch.reset_cntx_lock);

kvm_make_request(KVM_REQ_VCPU_RESET, vcpu);
kvm_riscv_vcpu_sbi_request_reset(vcpu, cp->a1, cp->a2);

/* userspace provides the suspend implementation */
kvm_riscv_vcpu_sbi_forward(vcpu, run);
13 changes: 7 additions & 6 deletions arch/riscv/kvm/vcpu_vector.c
Original file line number Diff line number Diff line change
@@ -22,6 +22,9 @@ void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;

cntx->sstatus &= ~SR_VS;

cntx->vector.vlenb = riscv_v_vsize / 32;

if (riscv_isa_extension_available(isa, v)) {
cntx->sstatus |= SR_VS_INITIAL;
WARN_ON(!cntx->vector.datap);
@@ -70,13 +73,11 @@ void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
__kvm_riscv_vector_restore(cntx);
}

int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
struct kvm_cpu_context *cntx)
int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu)
{
cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL);
if (!cntx->vector.datap)
vcpu->arch.guest_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
if (!vcpu->arch.guest_context.vector.datap)
return -ENOMEM;
cntx->vector.vlenb = riscv_v_vsize / 32;

vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL);
if (!vcpu->arch.host_context.vector.datap)
@@ -87,7 +88,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,

void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
{
kfree(vcpu->arch.guest_reset_context.vector.datap);
kfree(vcpu->arch.guest_context.vector.datap);
kfree(vcpu->arch.host_context.vector.datap);
}
#endif
Loading

0 comments on commit 1f7c9d5

Please sign in to comment.