Skip to content

Commit

Permalink
KVM: arm/arm64: Move timer IRQ map to latest possible time
Browse files Browse the repository at this point in the history
We are about to modify the VGIC to allocate all data structures
dynamically and store mapped IRQ information on a per-IRQ struct, which
is indeed allocated dynamically at init time.

Therefore, we cannot record the mapped IRQ info from the timer at timer
reset time like it's done now, because VCPU reset happens before timer
init.

A possible later time to do this is on the first run of a per VCPU, it
just requires us to move the enable state to be a per-VCPU state and do
the lookup of the physical IRQ number when we are about to run the VCPU.

Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
  • Loading branch information
Christoffer Dall committed May 20, 2016
1 parent c8eb3f6 commit 41a5448
Show file tree
Hide file tree
Showing 4 changed files with 47 additions and 38 deletions.
6 changes: 3 additions & 3 deletions arch/arm/kvm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ static void update_vttbr(struct kvm *kvm)
static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = vcpu->kvm;
int ret;
int ret = 0;

if (likely(vcpu->arch.has_run_once))
return 0;
Expand All @@ -482,9 +482,9 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
* interrupts from the virtual timer with a userspace gic.
*/
if (irqchip_in_kernel(kvm) && vgic_initialized(kvm))
kvm_timer_enable(kvm);
ret = kvm_timer_enable(vcpu);

return 0;
return ret;
}

bool kvm_arch_intc_initialized(struct kvm *kvm)
Expand Down
8 changes: 4 additions & 4 deletions include/kvm/arm_arch_timer.h
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@
#include <linux/workqueue.h>

struct arch_timer_kvm {
/* Is the timer enabled */
bool enabled;

/* Virtual offset */
cycle_t cntvoff;
};
Expand Down Expand Up @@ -55,10 +52,13 @@ struct arch_timer_cpu {

/* Active IRQ state caching */
bool active_cleared_last;

/* Is the timer enabled */
bool enabled;
};

int kvm_timer_hyp_init(void);
void kvm_timer_enable(struct kvm *kvm);
int kvm_timer_enable(struct kvm_vcpu *vcpu);
void kvm_timer_init(struct kvm *kvm);
int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq);
Expand Down
66 changes: 38 additions & 28 deletions virt/kvm/arm/arch_timer.c
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@ static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
* because the guest would never see the interrupt. Instead wait
* until we call this function from kvm_timer_flush_hwstate.
*/
if (!vgic_initialized(vcpu->kvm))
if (!vgic_initialized(vcpu->kvm) || !timer->enabled)
return -ENODEV;

if (kvm_timer_should_fire(vcpu) != timer->irq.level)
Expand Down Expand Up @@ -333,9 +333,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
const struct kvm_irq_level *irq)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct irq_desc *desc;
struct irq_data *data;
int phys_irq;

/*
* The vcpu timer irq number cannot be determined in
Expand All @@ -354,26 +351,7 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
timer->cntv_ctl = 0;
kvm_timer_update_state(vcpu);

/*
* Find the physical IRQ number corresponding to the host_vtimer_irq
*/
desc = irq_to_desc(host_vtimer_irq);
if (!desc) {
kvm_err("%s: no interrupt descriptor\n", __func__);
return -EINVAL;
}

data = irq_desc_get_irq_data(desc);
while (data->parent_data)
data = data->parent_data;

phys_irq = data->hwirq;

/*
* Tell the VGIC that the virtual interrupt is tied to a
* physical interrupt. We do that once per VCPU.
*/
return kvm_vgic_map_phys_irq(vcpu, irq->irq, phys_irq);
return 0;
}

void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -501,10 +479,40 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu)
kvm_vgic_unmap_phys_irq(vcpu, timer->irq.irq);
}

void kvm_timer_enable(struct kvm *kvm)
int kvm_timer_enable(struct kvm_vcpu *vcpu)
{
if (kvm->arch.timer.enabled)
return;
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
struct irq_desc *desc;
struct irq_data *data;
int phys_irq;
int ret;

if (timer->enabled)
return 0;

/*
* Find the physical IRQ number corresponding to the host_vtimer_irq
*/
desc = irq_to_desc(host_vtimer_irq);
if (!desc) {
kvm_err("%s: no interrupt descriptor\n", __func__);
return -EINVAL;
}

data = irq_desc_get_irq_data(desc);
while (data->parent_data)
data = data->parent_data;

phys_irq = data->hwirq;

/*
* Tell the VGIC that the virtual interrupt is tied to a
* physical interrupt. We do that once per VCPU.
*/
ret = kvm_vgic_map_phys_irq(vcpu, timer->irq.irq, phys_irq);
if (ret)
return ret;


/*
* There is a potential race here between VCPUs starting for the first
Expand All @@ -515,7 +523,9 @@ void kvm_timer_enable(struct kvm *kvm)
* the arch timers are enabled.
*/
if (timecounter && wqueue)
kvm->arch.timer.enabled = 1;
timer->enabled = 1;

return 0;
}

void kvm_timer_init(struct kvm *kvm)
Expand Down
5 changes: 2 additions & 3 deletions virt/kvm/arm/hyp/timer-sr.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,11 +24,10 @@
/* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{
struct kvm *kvm = kern_hyp_va(vcpu->kvm);
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val;

if (kvm->arch.timer.enabled) {
if (timer->enabled) {
timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
timer->cntv_cval = read_sysreg_el0(cntv_cval);
}
Expand Down Expand Up @@ -60,7 +59,7 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
val |= CNTHCTL_EL1PCTEN;
write_sysreg(val, cnthctl_el2);

if (kvm->arch.timer.enabled) {
if (timer->enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
write_sysreg_el0(timer->cntv_cval, cntv_cval);
isb();
Expand Down

0 comments on commit 41a5448

Please sign in to comment.