Skip to content

Commit

Permalink
RISC-V: KVM: Support sstc extension
Browse files Browse the repository at this point in the history
Sstc extension allows the guest to program the vstimecmp CSR directly
instead of making an SBI call to the hypervisor to program the next
event. The timer interrupt is also directly injected to the guest by
the hardware in this case. To maintain backward compatibility, the
hypervisors also update the vstimecmp in an SBI set_time call if
the hardware supports it. Thus, the older kernels in guest also
take advantage of the sstc extension.

Reviewed-by: Anup Patel <anup@brainfault.org>
Signed-off-by: Atish Patra <atishp@rivosinc.com>
Acked-by: Anup Patel <anup@brainfault.org>
Link: https://lore.kernel.org/all/CAAhSdy2mb6wyqy0NAn9BcTWKMYEc0Z4zU3s3j7oNqBz6eDQ9sg@mail.gmail.com/
Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
  • Loading branch information
Atish Patra authored and Palmer Dabbelt committed Aug 12, 2022
1 parent 9801002 commit 8f5cb44
Show file tree
Hide file tree
Showing 4 changed files with 153 additions and 7 deletions.
7 changes: 7 additions & 0 deletions arch/riscv/include/asm/kvm_vcpu_timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ struct kvm_vcpu_timer {
u64 next_cycles;
/* Underlying hrtimer instance */
struct hrtimer hrt;

/* Flag to check if sstc is enabled or not */
bool sstc_enabled;
/* A function pointer to switch between stimecmp or hrtimer at runtime */
int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles);
};

int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
Expand All @@ -40,5 +45,7 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
void kvm_riscv_guest_timer_init(struct kvm *kvm);
void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);

#endif
1 change: 1 addition & 0 deletions arch/riscv/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,7 @@ enum KVM_RISCV_ISA_EXT_ID {
KVM_RISCV_ISA_EXT_I,
KVM_RISCV_ISA_EXT_M,
KVM_RISCV_ISA_EXT_SVPBMT,
KVM_RISCV_ISA_EXT_SSTC,
KVM_RISCV_ISA_EXT_MAX,
};

Expand Down
8 changes: 7 additions & 1 deletion arch/riscv/kvm/vcpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,7 @@ static const unsigned long kvm_isa_ext_arr[] = {
RISCV_ISA_EXT_i,
RISCV_ISA_EXT_m,
RISCV_ISA_EXT_SVPBMT,
RISCV_ISA_EXT_SSTC,
};

static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext)
Expand Down Expand Up @@ -85,6 +86,7 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext)
case KVM_RISCV_ISA_EXT_C:
case KVM_RISCV_ISA_EXT_I:
case KVM_RISCV_ISA_EXT_M:
case KVM_RISCV_ISA_EXT_SSTC:
return false;
default:
break;
Expand Down Expand Up @@ -203,7 +205,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)

int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
{
return kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER);
return kvm_riscv_vcpu_timer_pending(vcpu);
}

void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -785,6 +787,8 @@ static void kvm_riscv_vcpu_update_config(const unsigned long *isa)
if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SVPBMT))
henvcfg |= ENVCFG_PBMTE;

if (__riscv_isa_extension_available(isa, RISCV_ISA_EXT_SSTC))
henvcfg |= ENVCFG_STCE;
csr_write(CSR_HENVCFG, henvcfg);
#ifdef CONFIG_32BIT
csr_write(CSR_HENVCFGH, henvcfg >> 32);
Expand Down Expand Up @@ -828,6 +832,8 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
vcpu->arch.isa);
kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context);

kvm_riscv_vcpu_timer_save(vcpu);

csr->vsstatus = csr_read(CSR_VSSTATUS);
csr->vsie = csr_read(CSR_VSIE);
csr->vstvec = csr_read(CSR_VSTVEC);
Expand Down
144 changes: 138 additions & 6 deletions arch/riscv/kvm/vcpu_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,18 @@ static int kvm_riscv_vcpu_timer_cancel(struct kvm_vcpu_timer *t)
return 0;
}

int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
static int kvm_riscv_vcpu_update_vstimecmp(struct kvm_vcpu *vcpu, u64 ncycles)
{
#if defined(CONFIG_32BIT)
csr_write(CSR_VSTIMECMP, ncycles & 0xFFFFFFFF);
csr_write(CSR_VSTIMECMPH, ncycles >> 32);
#else
csr_write(CSR_VSTIMECMP, ncycles);
#endif
return 0;
}

static int kvm_riscv_vcpu_update_hrtimer(struct kvm_vcpu *vcpu, u64 ncycles)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
Expand All @@ -88,6 +99,65 @@ int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
return 0;
}

int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;

return t->timer_next_event(vcpu, ncycles);
}

static enum hrtimer_restart kvm_riscv_vcpu_vstimer_expired(struct hrtimer *h)
{
u64 delta_ns;
struct kvm_vcpu_timer *t = container_of(h, struct kvm_vcpu_timer, hrt);
struct kvm_vcpu *vcpu = container_of(t, struct kvm_vcpu, arch.timer);
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;

if (kvm_riscv_current_cycles(gt) < t->next_cycles) {
delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
hrtimer_forward_now(&t->hrt, ktime_set(0, delta_ns));
return HRTIMER_RESTART;
}

t->next_set = false;
kvm_vcpu_kick(vcpu);

return HRTIMER_NORESTART;
}

bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;

if (!kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t) ||
kvm_riscv_vcpu_has_interrupts(vcpu, 1UL << IRQ_VS_TIMER))
return true;
else
return false;
}

static void kvm_riscv_vcpu_timer_blocking(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;
u64 delta_ns;

if (!t->init_done)
return;

delta_ns = kvm_riscv_delta_cycles2ns(t->next_cycles, gt, t);
if (delta_ns) {
hrtimer_start(&t->hrt, ktime_set(0, delta_ns), HRTIMER_MODE_REL);
t->next_set = true;
}
}

static void kvm_riscv_vcpu_timer_unblocking(struct kvm_vcpu *vcpu)
{
kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
}

int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
Expand Down Expand Up @@ -180,10 +250,20 @@ int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu)
return -EINVAL;

hrtimer_init(&t->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
t->init_done = true;
t->next_set = false;

/* Enable sstc for every vcpu if available in hardware */
if (riscv_isa_extension_available(NULL, SSTC)) {
t->sstc_enabled = true;
t->hrt.function = kvm_riscv_vcpu_vstimer_expired;
t->timer_next_event = kvm_riscv_vcpu_update_vstimecmp;
} else {
t->sstc_enabled = false;
t->hrt.function = kvm_riscv_vcpu_hrtimer_expired;
t->timer_next_event = kvm_riscv_vcpu_update_hrtimer;
}

return 0;
}

Expand All @@ -199,21 +279,73 @@ int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu)

int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_timer *t = &vcpu->arch.timer;

t->next_cycles = -1ULL;
return kvm_riscv_vcpu_timer_cancel(&vcpu->arch.timer);
}

void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
static void kvm_riscv_vcpu_update_timedelta(struct kvm_vcpu *vcpu)
{
struct kvm_guest_timer *gt = &vcpu->kvm->arch.timer;

#ifdef CONFIG_64BIT
csr_write(CSR_HTIMEDELTA, gt->time_delta);
#else
#if defined(CONFIG_32BIT)
csr_write(CSR_HTIMEDELTA, (u32)(gt->time_delta));
csr_write(CSR_HTIMEDELTAH, (u32)(gt->time_delta >> 32));
#else
csr_write(CSR_HTIMEDELTA, gt->time_delta);
#endif
}

void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr;
struct kvm_vcpu_timer *t = &vcpu->arch.timer;

kvm_riscv_vcpu_update_timedelta(vcpu);

if (!t->sstc_enabled)
return;

csr = &vcpu->arch.guest_csr;
#if defined(CONFIG_32BIT)
csr_write(CSR_VSTIMECMP, (u32)t->next_cycles);
csr_write(CSR_VSTIMECMPH, (u32)(t->next_cycles >> 32));
#else
csr_write(CSR_VSTIMECMP, t->next_cycles);
#endif

/* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done))
return;

kvm_riscv_vcpu_timer_unblocking(vcpu);
}

void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu)
{
struct kvm_vcpu_csr *csr;
struct kvm_vcpu_timer *t = &vcpu->arch.timer;

if (!t->sstc_enabled)
return;

csr = &vcpu->arch.guest_csr;
t = &vcpu->arch.timer;
#if defined(CONFIG_32BIT)
t->next_cycles = csr_read(CSR_VSTIMECMP);
t->next_cycles |= (u64)csr_read(CSR_VSTIMECMPH) << 32;
#else
t->next_cycles = csr_read(CSR_VSTIMECMP);
#endif
/* timer should be enabled for the remaining operations */
if (unlikely(!t->init_done))
return;

if (kvm_vcpu_is_blocking(vcpu))
kvm_riscv_vcpu_timer_blocking(vcpu);
}

void kvm_riscv_guest_timer_init(struct kvm *kvm)
{
struct kvm_guest_timer *gt = &kvm->arch.timer;
Expand Down

0 comments on commit 8f5cb44

Please sign in to comment.