From f2e600c149fda3453344f89c7e9353fe278ebd32 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 18 Oct 2017 13:06:25 +0200 Subject: [PATCH 01/26] arm64: Implement arch_counter_get_cntpct to read the physical counter As we are about to use the physical counter on arm64 systems that have KVM support, implement arch_counter_get_cntpct() and the associated errata workaround functionality for stable timer reads. Cc: Will Deacon Cc: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/include/asm/arch_timer.h | 8 +++----- drivers/clocksource/arm_arch_timer.c | 23 +++++++++++++++++++++++ 2 files changed, 26 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index a652ce0a5cb2c..04275de614dbd 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -52,6 +52,7 @@ struct arch_timer_erratum_workaround { const char *desc; u32 (*read_cntp_tval_el0)(void); u32 (*read_cntv_tval_el0)(void); + u64 (*read_cntpct_el0)(void); u64 (*read_cntvct_el0)(void); int (*set_next_event_phys)(unsigned long, struct clock_event_device *); int (*set_next_event_virt)(unsigned long, struct clock_event_device *); @@ -148,11 +149,8 @@ static inline void arch_timer_set_cntkctl(u32 cntkctl) static inline u64 arch_counter_get_cntpct(void) { - /* - * AArch64 kernel and user space mandate the use of CNTVCT. - */ - BUG(); - return 0; + isb(); + return arch_timer_reg_read_stable(cntpct_el0); } static inline u64 arch_counter_get_cntvct(void) diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fd4b7f684bd03..ff8f8a1771560 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -217,6 +217,11 @@ static u32 notrace fsl_a008585_read_cntv_tval_el0(void) return __fsl_a008585_read_reg(cntv_tval_el0); } +static u64 notrace fsl_a008585_read_cntpct_el0(void) +{ + return __fsl_a008585_read_reg(cntpct_el0); +} + static u64 notrace fsl_a008585_read_cntvct_el0(void) { return __fsl_a008585_read_reg(cntvct_el0); @@ -258,6 +263,11 @@ static u32 notrace hisi_161010101_read_cntv_tval_el0(void) return __hisi_161010101_read_reg(cntv_tval_el0); } +static u64 notrace hisi_161010101_read_cntpct_el0(void) +{ + return __hisi_161010101_read_reg(cntpct_el0); +} + static u64 notrace hisi_161010101_read_cntvct_el0(void) { return __hisi_161010101_read_reg(cntvct_el0); @@ -288,6 +298,15 @@ static struct ate_acpi_oem_info hisi_161010101_oem_info[] = { #endif #ifdef CONFIG_ARM64_ERRATUM_858921 +static u64 notrace arm64_858921_read_cntpct_el0(void) +{ + u64 old, new; + + old = read_sysreg(cntpct_el0); + new = read_sysreg(cntpct_el0); + return (((old ^ new) >> 32) & 1) ? old : new; +} + static u64 notrace arm64_858921_read_cntvct_el0(void) { u64 old, new; @@ -346,6 +365,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .desc = "Freescale erratum a005858", .read_cntp_tval_el0 = fsl_a008585_read_cntp_tval_el0, .read_cntv_tval_el0 = fsl_a008585_read_cntv_tval_el0, + .read_cntpct_el0 = fsl_a008585_read_cntpct_el0, .read_cntvct_el0 = fsl_a008585_read_cntvct_el0, .set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_virt = erratum_set_next_event_tval_virt, @@ -358,6 +378,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .desc = "HiSilicon erratum 161010101", .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, + .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, .set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_virt = erratum_set_next_event_tval_virt, @@ -368,6 +389,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .desc = "HiSilicon erratum 161010101", .read_cntp_tval_el0 = hisi_161010101_read_cntp_tval_el0, .read_cntv_tval_el0 = hisi_161010101_read_cntv_tval_el0, + .read_cntpct_el0 = hisi_161010101_read_cntpct_el0, .read_cntvct_el0 = hisi_161010101_read_cntvct_el0, .set_next_event_phys = erratum_set_next_event_tval_phys, .set_next_event_virt = erratum_set_next_event_tval_virt, @@ -378,6 +400,7 @@ static const struct arch_timer_erratum_workaround ool_workarounds[] = { .match_type = ate_match_local_cap_id, .id = (void *)ARM64_WORKAROUND_858921, .desc = "ARM erratum 858921", + .read_cntpct_el0 = arm64_858921_read_cntpct_el0, .read_cntvct_el0 = arm64_858921_read_cntvct_el0, }, #endif From e6d68b00e989f27116fd8575f1f9c217873e9b0e Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 5 Jul 2017 11:04:28 +0200 Subject: [PATCH 02/26] arm64: Use physical counter for in-kernel reads when booted in EL2 Using the physical counter allows KVM to retain the offset between the virtual and physical counter as long as it is actively running a VCPU. As soon as a VCPU is released, another thread is scheduled or we start running userspace applications, we reset the offset to 0, so that userspace accessing the virtual timer can still read the virtual counter and get the same view of time as the kernel. This opens up potential improvements for KVM performance, but we have to make a few adjustments to preserve system consistency. Currently get_cycles() is hardwired to arch_counter_get_cntvct() on arm64, but as we move to using the physical timer for the in-kernel time-keeping on systems that boot in EL2, we should use the same counter for get_cycles() as for other in-kernel timekeeping operations. Similarly, implementations of arch_timer_set_next_event_phys() is modified to use the counter specific to the timer being programmed. VHE kernels or kernels continuing to use the virtual timer are unaffected. Cc: Will Deacon Cc: Mark Rutland Acked-by: Catalin Marinas Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm64/include/asm/timex.h | 2 +- drivers/clocksource/arm_arch_timer.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/arch/arm64/include/asm/timex.h b/arch/arm64/include/asm/timex.h index 81a076eb37faf..9ad60bae5c8da 100644 --- a/arch/arm64/include/asm/timex.h +++ b/arch/arm64/include/asm/timex.h @@ -22,7 +22,7 @@ * Use the current timer as a cycle counter since this is what we use for * the delay loop. */ -#define get_cycles() arch_counter_get_cntvct() +#define get_cycles() arch_timer_read_counter() #include diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index ff8f8a1771560..061476e92db72 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -158,6 +158,7 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg, * if we don't have the cp15 accessors we won't have a problem. */ u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; +EXPORT_SYMBOL_GPL(arch_timer_read_counter); static u64 arch_counter_read(struct clocksource *cs) { @@ -329,16 +330,19 @@ static void erratum_set_next_event_tval_generic(const int access, unsigned long struct clock_event_device *clk) { unsigned long ctrl; - u64 cval = evt + arch_counter_get_cntvct(); + u64 cval; ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk); ctrl |= ARCH_TIMER_CTRL_ENABLE; ctrl &= ~ARCH_TIMER_CTRL_IT_MASK; - if (access == ARCH_TIMER_PHYS_ACCESS) + if (access == ARCH_TIMER_PHYS_ACCESS) { + cval = evt + arch_counter_get_cntpct(); write_sysreg(cval, cntp_cval_el0); - else + } else { + cval = evt + arch_counter_get_cntvct(); write_sysreg(cval, cntv_cval_el0); + } arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk); } @@ -913,7 +917,7 @@ static void __init arch_counter_register(unsigned type) /* Register the CP15 based counter if we have one */ if (type & ARCH_TIMER_TYPE_CP15) { - if (IS_ENABLED(CONFIG_ARM64) || + if ((IS_ENABLED(CONFIG_ARM64) && !is_hyp_mode_available()) || arch_timer_uses_ppi == ARCH_TIMER_VIRT_PPI) arch_timer_read_counter = arch_counter_get_cntvct; else From f39d16cbabf9f939745a3850a33760910d22ef35 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 19 Oct 2016 12:40:17 +0200 Subject: [PATCH 03/26] KVM: arm/arm64: Guard kvm_vgic_map_is_active against !vgic_initialized If the vgic is not initialized, don't try to grab its spinlocks or traverse its data structures. This is important because we soon have to start considering the active state of a virtual interrupts when doing vcpu_load, which may happen early on before the vgic is initialized. Signed-off-by: Christoffer Dall Acked-by: Marc Zyngier --- virt/kvm/arm/vgic/vgic.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index fed717e07938f..e1f7dbcfece03 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -777,6 +777,9 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); bool map_is_active; + if (!vgic_initialized(vcpu->kvm)) + return false; + spin_lock(&irq->irq_lock); map_is_active = irq->hw && irq->active; spin_unlock(&irq->irq_lock); From 006df0f34930e18d0aa52f05705bdfe1fc565943 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 16 Oct 2016 22:19:11 +0200 Subject: [PATCH 04/26] KVM: arm/arm64: Support calling vgic_update_irq_pending from irq context We are about to optimize our timer handling logic which involves injecting irqs to the vgic directly from the irq handler. Unfortunately, the injection path can take any AP list lock and irq lock and we must therefore make sure to use spin_lock_irqsave where ever interrupts are enabled and we are taking any of those locks, to avoid deadlocking between process context and the ISR. This changes a lot of the VGIC code, but the good news are that the changes are mostly mechanical. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-its.c | 17 +++++---- virt/kvm/arm/vgic/vgic-mmio-v2.c | 22 +++++++----- virt/kvm/arm/vgic/vgic-mmio-v3.c | 17 +++++---- virt/kvm/arm/vgic/vgic-mmio.c | 44 ++++++++++++++---------- virt/kvm/arm/vgic/vgic-v2.c | 5 +-- virt/kvm/arm/vgic/vgic-v3.c | 12 ++++--- virt/kvm/arm/vgic/vgic.c | 59 +++++++++++++++++++------------- virt/kvm/arm/vgic/vgic.h | 3 +- 8 files changed, 107 insertions(+), 72 deletions(-) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index f51c1e1b3f70f..9f5e347676284 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -278,6 +278,7 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, u64 propbase = GICR_PROPBASER_ADDRESS(kvm->arch.vgic.propbaser); u8 prop; int ret; + unsigned long flags; ret = kvm_read_guest(kvm, propbase + irq->intid - GIC_LPI_OFFSET, &prop, 1); @@ -285,15 +286,15 @@ static int update_lpi_config(struct kvm *kvm, struct vgic_irq *irq, if (ret) return ret; - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); if (!filter_vcpu || filter_vcpu == irq->target_vcpu) { irq->priority = LPI_PROP_PRIORITY(prop); irq->enabled = LPI_PROP_ENABLE_BIT(prop); - vgic_queue_irq_unlock(kvm, irq); + vgic_queue_irq_unlock(kvm, irq, flags); } else { - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); } return 0; @@ -393,6 +394,7 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) int ret = 0; u32 *intids; int nr_irqs, i; + unsigned long flags; nr_irqs = vgic_copy_lpi_list(vcpu, &intids); if (nr_irqs < 0) @@ -420,9 +422,9 @@ static int its_sync_lpi_pending_table(struct kvm_vcpu *vcpu) } irq = vgic_get_irq(vcpu->kvm, NULL, intids[i]); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = pendmask & (1U << bit_nr); - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -515,6 +517,7 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, { struct kvm_vcpu *vcpu; struct its_ite *ite; + unsigned long flags; if (!its->enabled) return -EBUSY; @@ -530,9 +533,9 @@ static int vgic_its_trigger_msi(struct kvm *kvm, struct vgic_its *its, if (!vcpu->arch.vgic_cpu.lpis_enabled) return -EBUSY; - spin_lock(&ite->irq->irq_lock); + spin_lock_irqsave(&ite->irq->irq_lock, flags); ite->irq->pending_latch = true; - vgic_queue_irq_unlock(kvm, ite->irq); + vgic_queue_irq_unlock(kvm, ite->irq, flags); return 0; } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v2.c b/virt/kvm/arm/vgic/vgic-mmio-v2.c index b3d4a10f09a11..e21e2f49b0052 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v2.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v2.c @@ -74,6 +74,7 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, int mode = (val >> 24) & 0x03; int c; struct kvm_vcpu *vcpu; + unsigned long flags; switch (mode) { case 0x0: /* as specified by targets */ @@ -97,11 +98,11 @@ static void vgic_mmio_write_sgir(struct kvm_vcpu *source_vcpu, irq = vgic_get_irq(source_vcpu->kvm, vcpu, intid); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = true; irq->source |= 1U << source_vcpu->vcpu_id; - vgic_queue_irq_unlock(source_vcpu->kvm, irq); + vgic_queue_irq_unlock(source_vcpu->kvm, irq, flags); vgic_put_irq(source_vcpu->kvm, irq); } } @@ -131,6 +132,7 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, u32 intid = VGIC_ADDR_TO_INTID(addr, 8); u8 cpu_mask = GENMASK(atomic_read(&vcpu->kvm->online_vcpus) - 1, 0); int i; + unsigned long flags; /* GICD_ITARGETSR[0-7] are read-only */ if (intid < VGIC_NR_PRIVATE_IRQS) @@ -140,13 +142,13 @@ static void vgic_mmio_write_target(struct kvm_vcpu *vcpu, struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, NULL, intid + i); int target; - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->targets = (val >> (i * 8)) & cpu_mask; target = irq->targets ? __ffs(irq->targets) : 0; irq->target_vcpu = kvm_get_vcpu(vcpu->kvm, target); - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -174,17 +176,18 @@ static void vgic_mmio_write_sgipendc(struct kvm_vcpu *vcpu, { u32 intid = addr & 0x0f; int i; + unsigned long flags; for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->source &= ~((val >> (i * 8)) & 0xff); if (!irq->source) irq->pending_latch = false; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -195,19 +198,20 @@ static void vgic_mmio_write_sgipends(struct kvm_vcpu *vcpu, { u32 intid = addr & 0x0f; int i; + unsigned long flags; for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->source |= (val >> (i * 8)) & 0xff; if (irq->source) { irq->pending_latch = true; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); } else { - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); } vgic_put_irq(vcpu->kvm, irq); } diff --git a/virt/kvm/arm/vgic/vgic-mmio-v3.c b/virt/kvm/arm/vgic/vgic-mmio-v3.c index 408ef06638fc6..83786108829e3 100644 --- a/virt/kvm/arm/vgic/vgic-mmio-v3.c +++ b/virt/kvm/arm/vgic/vgic-mmio-v3.c @@ -129,6 +129,7 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, { int intid = VGIC_ADDR_TO_INTID(addr, 64); struct vgic_irq *irq; + unsigned long flags; /* The upper word is WI for us since we don't implement Aff3. */ if (addr & 4) @@ -139,13 +140,13 @@ static void vgic_mmio_write_irouter(struct kvm_vcpu *vcpu, if (!irq) return; - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); /* We only care about and preserve Aff0, Aff1 and Aff2. */ irq->mpidr = val & GENMASK(23, 0); irq->target_vcpu = kvm_mpidr_to_vcpu(vcpu->kvm, irq->mpidr); - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -241,11 +242,12 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; + unsigned long flags; for (i = 0; i < len * 8; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); if (test_bit(i, &val)) { /* * pending_latch is set irrespective of irq type @@ -253,10 +255,10 @@ static void vgic_v3_uaccess_write_pending(struct kvm_vcpu *vcpu, * restore irq config before pending info. */ irq->pending_latch = true; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); } else { irq->pending_latch = false; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); } vgic_put_irq(vcpu->kvm, irq); @@ -799,6 +801,7 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) int sgi, c; int vcpu_id = vcpu->vcpu_id; bool broadcast; + unsigned long flags; sgi = (reg & ICC_SGI1R_SGI_ID_MASK) >> ICC_SGI1R_SGI_ID_SHIFT; broadcast = reg & BIT_ULL(ICC_SGI1R_IRQ_ROUTING_MODE_BIT); @@ -837,10 +840,10 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg) irq = vgic_get_irq(vcpu->kvm, c_vcpu, sgi); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = true; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_put_irq(vcpu->kvm, irq); } } diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c index c1e4bdd66131e..deb51ee16a3da 100644 --- a/virt/kvm/arm/vgic/vgic-mmio.c +++ b/virt/kvm/arm/vgic/vgic-mmio.c @@ -69,13 +69,14 @@ void vgic_mmio_write_senable(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; + unsigned long flags; for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->enabled = true; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -87,15 +88,16 @@ void vgic_mmio_write_cenable(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; + unsigned long flags; for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->enabled = false; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -126,14 +128,15 @@ void vgic_mmio_write_spending(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; + unsigned long flags; for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = true; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -144,15 +147,16 @@ void vgic_mmio_write_cpending(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 1); int i; + unsigned long flags; for_each_set_bit(i, &val, len * 8) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->pending_latch = false; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -181,7 +185,8 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, bool new_active_state) { struct kvm_vcpu *requester_vcpu; - spin_lock(&irq->irq_lock); + unsigned long flags; + spin_lock_irqsave(&irq->irq_lock, flags); /* * The vcpu parameter here can mean multiple things depending on how @@ -216,9 +221,9 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, irq->active = new_active_state; if (new_active_state) - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); else - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); } /* @@ -352,14 +357,15 @@ void vgic_mmio_write_priority(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 8); int i; + unsigned long flags; for (i = 0; i < len; i++) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); /* Narrow the priority range to what we actually support */ irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -390,6 +396,7 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, { u32 intid = VGIC_ADDR_TO_INTID(addr, 2); int i; + unsigned long flags; for (i = 0; i < len * 4; i++) { struct vgic_irq *irq; @@ -404,14 +411,14 @@ void vgic_mmio_write_config(struct kvm_vcpu *vcpu, continue; irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); if (test_bit(i * 2 + 1, &val)) irq->config = VGIC_CONFIG_EDGE; else irq->config = VGIC_CONFIG_LEVEL; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } } @@ -443,6 +450,7 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, { int i; int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; + unsigned long flags; for (i = 0; i < 32; i++) { struct vgic_irq *irq; @@ -459,12 +467,12 @@ void vgic_write_irq_line_level_info(struct kvm_vcpu *vcpu, u32 intid, * restore irq config before line level. */ new_level = !!(val & (1U << i)); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->line_level = new_level; if (new_level) - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); else - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c index e4187e52bb26e..80897102da26c 100644 --- a/virt/kvm/arm/vgic/vgic-v2.c +++ b/virt/kvm/arm/vgic/vgic-v2.c @@ -62,6 +62,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2; int lr; + unsigned long flags; cpuif->vgic_hcr &= ~GICH_HCR_UIE; @@ -77,7 +78,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) irq = vgic_get_irq(vcpu->kvm, vcpu, intid); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); /* Always preserve the active bit */ irq->active = !!(val & GICH_LR_ACTIVE_BIT); @@ -104,7 +105,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu) irq->pending_latch = false; } - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index 96ea597db0e77..863351c090d8f 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -44,6 +44,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3; u32 model = vcpu->kvm->arch.vgic.vgic_model; int lr; + unsigned long flags; cpuif->vgic_hcr &= ~ICH_HCR_UIE; @@ -66,7 +67,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) if (!irq) /* An LPI could have been unmapped. */ continue; - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); /* Always preserve the active bit */ irq->active = !!(val & ICH_LR_ACTIVE_BIT); @@ -94,7 +95,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) irq->pending_latch = false; } - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); } @@ -278,6 +279,7 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) bool status; u8 val; int ret; + unsigned long flags; retry: vcpu = irq->target_vcpu; @@ -296,13 +298,13 @@ int vgic_v3_lpi_sync_pending_status(struct kvm *kvm, struct vgic_irq *irq) status = val & (1 << bit_nr); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); if (irq->target_vcpu != vcpu) { - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); goto retry; } irq->pending_latch = status; - vgic_queue_irq_unlock(vcpu->kvm, irq); + vgic_queue_irq_unlock(vcpu->kvm, irq, flags); if (status) { /* clear consumed data */ diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index e1f7dbcfece03..e54ef2fdf73dd 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c @@ -53,6 +53,10 @@ struct vgic_global kvm_vgic_global_state __ro_after_init = { * vcpuX->vcpu_id < vcpuY->vcpu_id: * spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock); * spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock); + * + * Since the VGIC must support injecting virtual interrupts from ISRs, we have + * to use the spin_lock_irqsave/spin_unlock_irqrestore versions of outer + * spinlocks for any lock that may be taken while injecting an interrupt. */ /* @@ -261,7 +265,8 @@ static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owne * Needs to be entered with the IRQ lock already held, but will return * with all locks dropped. */ -bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) +bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, + unsigned long flags) { struct kvm_vcpu *vcpu; @@ -279,7 +284,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) * not need to be inserted into an ap_list and there is also * no more work for us to do. */ - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); /* * We have to kick the VCPU here, because we could be @@ -301,11 +306,11 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) * We must unlock the irq lock to take the ap_list_lock where * we are going to insert this new pending interrupt. */ - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); /* someone can do stuff here, which we re-check below */ - spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); + spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); spin_lock(&irq->irq_lock); /* @@ -322,9 +327,9 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { spin_unlock(&irq->irq_lock); - spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); + spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); goto retry; } @@ -337,7 +342,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq) irq->vcpu = vcpu; spin_unlock(&irq->irq_lock); - spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); + spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); kvm_vcpu_kick(vcpu); @@ -367,6 +372,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, { struct kvm_vcpu *vcpu; struct vgic_irq *irq; + unsigned long flags; int ret; trace_vgic_update_irq_pending(cpuid, intid, level); @@ -383,11 +389,11 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, if (!irq) return -EINVAL; - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); if (!vgic_validate_injection(irq, level, owner)) { /* Nothing to see here, move along... */ - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(kvm, irq); return 0; } @@ -397,7 +403,7 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, else irq->pending_latch = true; - vgic_queue_irq_unlock(kvm, irq); + vgic_queue_irq_unlock(kvm, irq, flags); vgic_put_irq(kvm, irq); return 0; @@ -406,15 +412,16 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid, int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); + unsigned long flags; BUG_ON(!irq); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->hw = true; irq->hwintid = phys_irq; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); return 0; @@ -423,6 +430,7 @@ int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, u32 virt_irq, u32 phys_irq) int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) { struct vgic_irq *irq; + unsigned long flags; if (!vgic_initialized(vcpu->kvm)) return -EAGAIN; @@ -430,12 +438,12 @@ int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int virt_irq) irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); BUG_ON(!irq); - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); irq->hw = false; irq->hwintid = 0; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); return 0; @@ -486,9 +494,10 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) { struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_irq *irq, *tmp; + unsigned long flags; retry: - spin_lock(&vgic_cpu->ap_list_lock); + spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB; @@ -528,7 +537,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) /* This interrupt looks like it has to be migrated. */ spin_unlock(&irq->irq_lock); - spin_unlock(&vgic_cpu->ap_list_lock); + spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); /* * Ensure locking order by always locking the smallest @@ -542,7 +551,7 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) vcpuB = vcpu; } - spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); + spin_lock_irqsave(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, SINGLE_DEPTH_NESTING); spin_lock(&irq->irq_lock); @@ -566,11 +575,11 @@ static void vgic_prune_ap_list(struct kvm_vcpu *vcpu) spin_unlock(&irq->irq_lock); spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); - spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); + spin_unlock_irqrestore(&vcpuA->arch.vgic_cpu.ap_list_lock, flags); goto retry; } - spin_unlock(&vgic_cpu->ap_list_lock); + spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); } static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) @@ -703,6 +712,8 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu) if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) return; + DEBUG_SPINLOCK_BUG_ON(!irqs_disabled()); + spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); vgic_flush_lr_state(vcpu); spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); @@ -735,11 +746,12 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; struct vgic_irq *irq; bool pending = false; + unsigned long flags; if (!vcpu->kvm->arch.vgic.enabled) return false; - spin_lock(&vgic_cpu->ap_list_lock); + spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { spin_lock(&irq->irq_lock); @@ -750,7 +762,7 @@ int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) break; } - spin_unlock(&vgic_cpu->ap_list_lock); + spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); return pending; } @@ -776,13 +788,14 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq) { struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, virt_irq); bool map_is_active; + unsigned long flags; if (!vgic_initialized(vcpu->kvm)) return false; - spin_lock(&irq->irq_lock); + spin_lock_irqsave(&irq->irq_lock, flags); map_is_active = irq->hw && irq->active; - spin_unlock(&irq->irq_lock); + spin_unlock_irqrestore(&irq->irq_lock, flags); vgic_put_irq(vcpu->kvm, irq); return map_is_active; diff --git a/virt/kvm/arm/vgic/vgic.h b/virt/kvm/arm/vgic/vgic.h index bf9ceab67c770..4f8aecb07ae6f 100644 --- a/virt/kvm/arm/vgic/vgic.h +++ b/virt/kvm/arm/vgic/vgic.h @@ -140,7 +140,8 @@ vgic_get_mmio_region(struct kvm_vcpu *vcpu, struct vgic_io_device *iodev, struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, u32 intid); void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq); -bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq); +bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, + unsigned long flags); void vgic_kick_vcpus(struct kvm *kvm); int vgic_check_ioaddr(struct kvm *kvm, phys_addr_t *ioaddr, From d33a3c8c48c3264419a683885a27a5c85df35f12 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 6 Dec 2016 22:00:52 +0100 Subject: [PATCH 05/26] KVM: arm/arm64: Check that system supports split eoi/deactivate Some systems without proper firmware and/or hardware description data don't support the split EOI and deactivate operation. On such systems, we cannot leave the physical interrupt active after the timer handler on the host has run, so we cannot support KVM with an in-kernel GIC with the timer changes we are about to introduce. This patch makes sure that trying to initialize the KVM GIC code will fail on such systems. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- drivers/irqchip/irq-gic-v3.c | 8 ++++++-- drivers/irqchip/irq-gic.c | 6 ++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index b5df99c6f680f..854334a6f2254 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1228,7 +1228,9 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare goto out_unmap_rdist; gic_populate_ppi_partitions(node); - gic_of_setup_kvm_info(node); + + if (static_key_true(&supports_deactivate)) + gic_of_setup_kvm_info(node); return 0; out_unmap_rdist: @@ -1517,7 +1519,9 @@ gic_acpi_init(struct acpi_subtable_header *header, const unsigned long end) goto out_fwhandle_free; acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); - gic_acpi_setup_kvm_info(); + + if (static_key_true(&supports_deactivate)) + gic_acpi_setup_kvm_info(); return 0; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 651d726e8b123..cd9371b749c2e 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1367,7 +1367,8 @@ static void __init gic_of_setup_kvm_info(struct device_node *node) if (ret) return; - gic_set_kvm_info(&gic_v2_kvm_info); + if (static_key_true(&supports_deactivate)) + gic_set_kvm_info(&gic_v2_kvm_info); } int __init @@ -1599,7 +1600,8 @@ static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) gicv2m_init(NULL, gic_data[0].domain); - gic_acpi_setup_kvm_info(); + if (static_key_true(&supports_deactivate)) + gic_acpi_setup_kvm_info(); return 0; } From 8409a06f2a2c0baeb6e6ff020b2c5a4592b3078d Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sat, 17 Jun 2017 01:09:19 -0700 Subject: [PATCH 06/26] KVM: arm/arm64: Make timer_arm and timer_disarm helpers more generic We are about to add an additional soft timer to the arch timer state for a VCPU and would like to be able to reuse the functions to program and cancel a timer, so we make them slightly more generic and rename to make it more clear that these functions work on soft timers and not the hardware resource that this code is managing. The armed flag on the timer state is only used to assert a condition, and we don't rely on this assertion in any meaningful way, so we can simply get rid of this flack and slightly reduce complexity. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- include/kvm/arm_arch_timer.h | 3 --- virt/kvm/arm/arch_timer.c | 33 +++++++++++---------------------- 2 files changed, 11 insertions(+), 25 deletions(-) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index f0053f884b4a2..d0beae98f755e 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -48,9 +48,6 @@ struct arch_timer_cpu { /* Work queued with the above timer expires */ struct work_struct expired; - /* Background timer active */ - bool armed; - /* Is the timer enabled */ bool enabled; }; diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 8e89d63005c7f..2232301911953 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -56,26 +56,16 @@ u64 kvm_phys_timer_read(void) return timecounter->cc->read(timecounter->cc); } -static bool timer_is_armed(struct arch_timer_cpu *timer) +static void soft_timer_start(struct hrtimer *hrt, u64 ns) { - return timer->armed; -} - -/* timer_arm: as in "arm the timer", not as in ARM the company */ -static void timer_arm(struct arch_timer_cpu *timer, u64 ns) -{ - timer->armed = true; - hrtimer_start(&timer->timer, ktime_add_ns(ktime_get(), ns), + hrtimer_start(hrt, ktime_add_ns(ktime_get(), ns), HRTIMER_MODE_ABS); } -static void timer_disarm(struct arch_timer_cpu *timer) +static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) { - if (timer_is_armed(timer)) { - hrtimer_cancel(&timer->timer); - cancel_work_sync(&timer->expired); - timer->armed = false; - } + hrtimer_cancel(hrt); + cancel_work_sync(work); } static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) @@ -271,7 +261,7 @@ static void kvm_timer_emulate(struct kvm_vcpu *vcpu, return; /* The timer has not yet expired, schedule a background timer */ - timer_arm(timer, kvm_timer_compute_delta(timer_ctx)); + soft_timer_start(&timer->timer, kvm_timer_compute_delta(timer_ctx)); } /* @@ -285,8 +275,6 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - BUG_ON(timer_is_armed(timer)); - /* * No need to schedule a background timer if any guest timer has * already expired, because kvm_vcpu_block will return before putting @@ -306,13 +294,14 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) * The guest timers have not yet expired, schedule a background timer. * Set the earliest expiration time among the guest timers. */ - timer_arm(timer, kvm_timer_earliest_exp(vcpu)); + soft_timer_start(&timer->timer, kvm_timer_earliest_exp(vcpu)); } void kvm_timer_unschedule(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - timer_disarm(timer); + + soft_timer_cancel(&timer->timer, &timer->expired); } static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu) @@ -448,7 +437,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) * This is to cancel the background timer for the physical timer * emulation if it is set. */ - timer_disarm(timer); + soft_timer_cancel(&timer->timer, &timer->expired); /* * The guest could have modified the timer registers or the timer @@ -615,7 +604,7 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - timer_disarm(timer); + soft_timer_cancel(&timer->timer, &timer->expired); kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); } From 14d61fa98f03cb01f3aea7e3069fdf460caf5587 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sat, 17 Jun 2017 07:33:02 -0700 Subject: [PATCH 07/26] KVM: arm/arm64: Rename soft timer to bg_timer As we are about to introduce a separate hrtimer for the physical timer, call this timer bg_timer, because we refer to this timer as the background timer in the code and comments elsewhere. Signed-off-by: Christoffer Dall Acked-by: Marc Zyngier --- include/kvm/arm_arch_timer.h | 2 +- virt/kvm/arm/arch_timer.c | 18 +++++++++--------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index d0beae98f755e..0eed5bc9fd5ec 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -43,7 +43,7 @@ struct arch_timer_cpu { struct arch_timer_context ptimer; /* Background timer used when the guest is not running */ - struct hrtimer timer; + struct hrtimer bg_timer; /* Work queued with the above timer expires */ struct work_struct expired; diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 2232301911953..8ec392850e699 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -148,13 +148,13 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) return min(min_virt, min_phys); } -static enum hrtimer_restart kvm_timer_expire(struct hrtimer *hrt) +static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) { struct arch_timer_cpu *timer; struct kvm_vcpu *vcpu; u64 ns; - timer = container_of(hrt, struct arch_timer_cpu, timer); + timer = container_of(hrt, struct arch_timer_cpu, bg_timer); vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); /* @@ -261,7 +261,7 @@ static void kvm_timer_emulate(struct kvm_vcpu *vcpu, return; /* The timer has not yet expired, schedule a background timer */ - soft_timer_start(&timer->timer, kvm_timer_compute_delta(timer_ctx)); + soft_timer_start(&timer->bg_timer, kvm_timer_compute_delta(timer_ctx)); } /* @@ -294,14 +294,14 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) * The guest timers have not yet expired, schedule a background timer. * Set the earliest expiration time among the guest timers. */ - soft_timer_start(&timer->timer, kvm_timer_earliest_exp(vcpu)); + soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); } void kvm_timer_unschedule(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - soft_timer_cancel(&timer->timer, &timer->expired); + soft_timer_cancel(&timer->bg_timer, &timer->expired); } static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu) @@ -437,7 +437,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) * This is to cancel the background timer for the physical timer * emulation if it is set. */ - soft_timer_cancel(&timer->timer, &timer->expired); + soft_timer_cancel(&timer->bg_timer, &timer->expired); /* * The guest could have modified the timer registers or the timer @@ -494,8 +494,8 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) vcpu_ptimer(vcpu)->cntvoff = 0; INIT_WORK(&timer->expired, kvm_timer_inject_irq_work); - hrtimer_init(&timer->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - timer->timer.function = kvm_timer_expire; + hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + timer->bg_timer.function = kvm_bg_timer_expire; vtimer->irq.irq = default_vtimer_irq.irq; ptimer->irq.irq = default_ptimer_irq.irq; @@ -604,7 +604,7 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - soft_timer_cancel(&timer->timer, &timer->expired); + soft_timer_cancel(&timer->bg_timer, &timer->expired); kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); } From ee9bb9a1e3c6e40874c1611ac24b76c87d2cba7b Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 16 Oct 2016 20:24:30 +0200 Subject: [PATCH 08/26] KVM: arm/arm64: Move timer/vgic flush/sync under disabled irq As we are about to play tricks with the timer to be more lazy in saving and restoring state, we need to move the timer sync and flush functions under a disabled irq section and since we have to flush the vgic state after the timer and PMU state, we do the whole flush/sync sequence with disabled irqs. The only downside is a slightly longer delay before being able to process hardware interrupts and run softirqs. Signed-off-by: Christoffer Dall Reviewed-by: Marc Zyngier --- virt/kvm/arm/arm.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index b9f68e4add710..27db222a0c8d9 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -654,11 +654,11 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_pmu_flush_hwstate(vcpu); + local_irq_disable(); + kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); - local_irq_disable(); - /* * If we have a singal pending, or need to notify a userspace * irqchip about timer or PMU level changes, then we exit (and @@ -683,10 +683,10 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) || kvm_request_pending(vcpu)) { vcpu->mode = OUTSIDE_GUEST_MODE; - local_irq_enable(); kvm_pmu_sync_hwstate(vcpu); kvm_timer_sync_hwstate(vcpu); kvm_vgic_sync_hwstate(vcpu); + local_irq_enable(); preempt_enable(); continue; } @@ -709,6 +709,16 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_arm_clear_debug(vcpu); + /* + * We must sync the PMU and timer state before the vgic state so + * that the vgic can properly sample the updated state of the + * interrupt line. + */ + kvm_pmu_sync_hwstate(vcpu); + kvm_timer_sync_hwstate(vcpu); + + kvm_vgic_sync_hwstate(vcpu); + /* * We may have taken a host interrupt in HYP mode (ie * while executing the guest). This interrupt is still @@ -732,16 +742,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) guest_exit(); trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); - /* - * We must sync the PMU and timer state before the vgic state so - * that the vgic can properly sample the updated state of the - * interrupt line. - */ - kvm_pmu_sync_hwstate(vcpu); - kvm_timer_sync_hwstate(vcpu); - - kvm_vgic_sync_hwstate(vcpu); - preempt_enable(); ret = handle_exit(vcpu, run, ret); From f2a2129e0ac8d8fa79c3f85425c36f6e3368f022 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 18 Jun 2017 00:32:08 -0700 Subject: [PATCH 09/26] KVM: arm/arm64: Use separate timer for phys timer emulation We were using the same hrtimer for emulating the physical timer and for making sure a blocking VCPU thread would be eventually woken up. That worked fine in the previous arch timer design, but as we are about to actually use the soft timer expire function for the physical timer emulation, change the logic to use a dedicated hrtimer. This has the added benefit of not having to cancel any work in the sync path, which in turn allows us to run the flush and sync with IRQs disabled. Note that the hrtimer used to program the host kernel's timer to generate an exit from the guest when the emulated physical timer fires never has to inject any work, and to share the soft_timer_cancel() function with the bg_timer, we change the function to only cancel any pending work if the pointer to the work struct is not null. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- include/kvm/arm_arch_timer.h | 3 +++ virt/kvm/arm/arch_timer.c | 21 ++++++++++++++++----- 2 files changed, 19 insertions(+), 5 deletions(-) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 0eed5bc9fd5ec..184c3ef2df93c 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -48,6 +48,9 @@ struct arch_timer_cpu { /* Work queued with the above timer expires */ struct work_struct expired; + /* Physical timer emulation */ + struct hrtimer phys_timer; + /* Is the timer enabled */ bool enabled; }; diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 8ec392850e699..4ad853737c343 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -65,7 +65,8 @@ static void soft_timer_start(struct hrtimer *hrt, u64 ns) static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) { hrtimer_cancel(hrt); - cancel_work_sync(work); + if (work) + cancel_work_sync(work); } static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) @@ -172,6 +173,12 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) return HRTIMER_NORESTART; } +static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) +{ + WARN(1, "Timer only used to ensure guest exit - unexpected event."); + return HRTIMER_NORESTART; +} + bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) { u64 cval, now; @@ -249,7 +256,7 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) } /* Schedule the background timer for the emulated timer. */ -static void kvm_timer_emulate(struct kvm_vcpu *vcpu, +static void phys_timer_emulate(struct kvm_vcpu *vcpu, struct arch_timer_context *timer_ctx) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; @@ -261,7 +268,7 @@ static void kvm_timer_emulate(struct kvm_vcpu *vcpu, return; /* The timer has not yet expired, schedule a background timer */ - soft_timer_start(&timer->bg_timer, kvm_timer_compute_delta(timer_ctx)); + soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx)); } /* @@ -414,7 +421,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) kvm_timer_update_state(vcpu); /* Set the background timer for the physical timer emulation. */ - kvm_timer_emulate(vcpu, vcpu_ptimer(vcpu)); + phys_timer_emulate(vcpu, vcpu_ptimer(vcpu)); if (unlikely(!irqchip_in_kernel(vcpu->kvm))) kvm_timer_flush_hwstate_user(vcpu); @@ -437,7 +444,7 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) * This is to cancel the background timer for the physical timer * emulation if it is set. */ - soft_timer_cancel(&timer->bg_timer, &timer->expired); + soft_timer_cancel(&timer->phys_timer, NULL); /* * The guest could have modified the timer registers or the timer @@ -497,6 +504,9 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); timer->bg_timer.function = kvm_bg_timer_expire; + hrtimer_init(&timer->phys_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + timer->phys_timer.function = kvm_phys_timer_expire; + vtimer->irq.irq = default_vtimer_irq.irq; ptimer->irq.irq = default_ptimer_irq.irq; } @@ -605,6 +615,7 @@ void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu) struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); soft_timer_cancel(&timer->bg_timer, &timer->expired); + soft_timer_cancel(&timer->phys_timer, NULL); kvm_vgic_unmap_phys_irq(vcpu, vtimer->irq.irq); } From 688c50aa72f64ca21767486e5eef876ec23e418c Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 4 Jan 2017 16:10:28 +0100 Subject: [PATCH 10/26] KVM: arm/arm64: Move timer save/restore out of the hyp code As we are about to be lazy with saving and restoring the timer registers, we prepare by moving all possible timer configuration logic out of the hyp code. All virtual timer registers can be programmed from EL1 and since the arch timer is always a level triggered interrupt we can safely do this with interrupts disabled in the host kernel on the way to the guest without taking vtimer interrupts in the host kernel (yet). The downside is that the cntvoff register can only be programmed from hyp mode, so we jump into hyp mode and back to program it. This is also safe, because the host kernel doesn't use the virtual timer in the KVM code. It may add a little performance performance penalty, but only until following commits where we move this operation to vcpu load/put. Signed-off-by: Christoffer Dall Reviewed-by: Marc Zyngier --- arch/arm/include/asm/kvm_asm.h | 2 + arch/arm/include/asm/kvm_hyp.h | 4 +- arch/arm/kvm/hyp/switch.c | 7 +-- arch/arm64/include/asm/kvm_asm.h | 2 + arch/arm64/include/asm/kvm_hyp.h | 4 +- arch/arm64/kvm/hyp/switch.c | 6 +-- virt/kvm/arm/arch_timer.c | 48 +++++++++++++++++++++ virt/kvm/arm/hyp/timer-sr.c | 74 ++++++++++++++------------------ 8 files changed, 95 insertions(+), 52 deletions(-) diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index 14d68a4d826f6..36dd2962a42db 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -68,6 +68,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); +extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); + extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern void __init_stage2_translation(void); diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 14b5903f02246..ab20ffa8b9e76 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -98,8 +98,8 @@ #define cntvoff_el2 CNTVOFF #define cnthctl_el2 CNTHCTL -void __timer_save_state(struct kvm_vcpu *vcpu); -void __timer_restore_state(struct kvm_vcpu *vcpu); +void __timer_enable_traps(struct kvm_vcpu *vcpu); +void __timer_disable_traps(struct kvm_vcpu *vcpu); void __vgic_v2_save_state(struct kvm_vcpu *vcpu); void __vgic_v2_restore_state(struct kvm_vcpu *vcpu); diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c index ebd2dd46adf7f..330c9ce34ba5f 100644 --- a/arch/arm/kvm/hyp/switch.c +++ b/arch/arm/kvm/hyp/switch.c @@ -174,7 +174,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __activate_vm(vcpu); __vgic_restore_state(vcpu); - __timer_restore_state(vcpu); + __timer_enable_traps(vcpu); __sysreg_restore_state(guest_ctxt); __banked_restore_state(guest_ctxt); @@ -191,7 +191,8 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __banked_save_state(guest_ctxt); __sysreg_save_state(guest_ctxt); - __timer_save_state(vcpu); + __timer_disable_traps(vcpu); + __vgic_save_state(vcpu); __deactivate_traps(vcpu); @@ -237,7 +238,7 @@ void __hyp_text __noreturn __hyp_panic(int cause) vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); - __timer_save_state(vcpu); + __timer_disable_traps(vcpu); __deactivate_traps(vcpu); __deactivate_vm(vcpu); __banked_restore_state(host_ctxt); diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 26a64d0f9ab91..ab4d0a926043a 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -55,6 +55,8 @@ extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); +extern void __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high); + extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); extern u64 __vgic_v3_get_ich_vtr_el2(void); diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h index 4572a9b560fa3..08d3bb66c8b75 100644 --- a/arch/arm64/include/asm/kvm_hyp.h +++ b/arch/arm64/include/asm/kvm_hyp.h @@ -129,8 +129,8 @@ void __vgic_v3_save_state(struct kvm_vcpu *vcpu); void __vgic_v3_restore_state(struct kvm_vcpu *vcpu); int __vgic_v3_perform_cpuif_access(struct kvm_vcpu *vcpu); -void __timer_save_state(struct kvm_vcpu *vcpu); -void __timer_restore_state(struct kvm_vcpu *vcpu); +void __timer_enable_traps(struct kvm_vcpu *vcpu); +void __timer_disable_traps(struct kvm_vcpu *vcpu); void __sysreg_save_host_state(struct kvm_cpu_context *ctxt); void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt); diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index 945e79c641c4a..4994f4bdaca5d 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -298,7 +298,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __activate_vm(vcpu); __vgic_restore_state(vcpu); - __timer_restore_state(vcpu); + __timer_enable_traps(vcpu); /* * We must restore the 32-bit state before the sysregs, thanks @@ -368,7 +368,7 @@ int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) __sysreg_save_guest_state(guest_ctxt); __sysreg32_save_state(vcpu); - __timer_save_state(vcpu); + __timer_disable_traps(vcpu); __vgic_save_state(vcpu); __deactivate_traps(vcpu); @@ -436,7 +436,7 @@ void __hyp_text __noreturn __hyp_panic(void) vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); - __timer_save_state(vcpu); + __timer_disable_traps(vcpu); __deactivate_traps(vcpu); __deactivate_vm(vcpu); __sysreg_restore_host_state(host_ctxt); diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 4ad853737c343..93c8973a71f47 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -271,6 +271,20 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu, soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx)); } +static void timer_save_state(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + + if (timer->enabled) { + vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); + vtimer->cnt_cval = read_sysreg_el0(cntv_cval); + } + + /* Disable the virtual timer */ + write_sysreg_el0(0, cntv_ctl); +} + /* * Schedule the background timer before calling kvm_vcpu_block, so that this * thread is removed from its waitqueue and made runnable when there's a timer @@ -304,6 +318,18 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); } +static void timer_restore_state(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + + if (timer->enabled) { + write_sysreg_el0(vtimer->cnt_cval, cntv_cval); + isb(); + write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); + } +} + void kvm_timer_unschedule(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; @@ -311,6 +337,21 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu) soft_timer_cancel(&timer->bg_timer, &timer->expired); } +static void set_cntvoff(u64 cntvoff) +{ + u32 low = lower_32_bits(cntvoff); + u32 high = upper_32_bits(cntvoff); + + /* + * Since kvm_call_hyp doesn't fully support the ARM PCS especially on + * 32-bit systems, but rather passes register by register shifted one + * place (we put the function address in r0/x0), we cannot simply pass + * a 64-bit value as an argument, but have to split the value in two + * 32-bit halves. + */ + kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); +} + static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu) { struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); @@ -414,6 +455,7 @@ static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu) void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); if (unlikely(!timer->enabled)) return; @@ -427,6 +469,9 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) kvm_timer_flush_hwstate_user(vcpu); else kvm_timer_flush_hwstate_vgic(vcpu); + + set_cntvoff(vtimer->cntvoff); + timer_restore_state(vcpu); } /** @@ -446,6 +491,9 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) */ soft_timer_cancel(&timer->phys_timer, NULL); + timer_save_state(vcpu); + set_cntvoff(0); + /* * The guest could have modified the timer registers or the timer * could have expired, update the timer state. diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c index 4734915ab71f7..f39861639f08f 100644 --- a/virt/kvm/arm/hyp/timer-sr.c +++ b/virt/kvm/arm/hyp/timer-sr.c @@ -21,58 +21,48 @@ #include -/* vcpu is already in the HYP VA space */ -void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu) +void __hyp_text __kvm_timer_set_cntvoff(u32 cntvoff_low, u32 cntvoff_high) +{ + u64 cntvoff = (u64)cntvoff_high << 32 | cntvoff_low; + write_sysreg(cntvoff, cntvoff_el2); +} + +void __hyp_text enable_el1_phys_timer_access(void) { - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); u64 val; - if (timer->enabled) { - vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); - vtimer->cnt_cval = read_sysreg_el0(cntv_cval); - } + /* Allow physical timer/counter access for the host */ + val = read_sysreg(cnthctl_el2); + val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; + write_sysreg(val, cnthctl_el2); +} - /* Disable the virtual timer */ - write_sysreg_el0(0, cntv_ctl); +void __hyp_text disable_el1_phys_timer_access(void) +{ + u64 val; + /* + * Disallow physical timer access for the guest + * Physical counter access is allowed + */ + val = read_sysreg(cnthctl_el2); + val &= ~CNTHCTL_EL1PCEN; + val |= CNTHCTL_EL1PCTEN; + write_sysreg(val, cnthctl_el2); +} + +void __hyp_text __timer_disable_traps(struct kvm_vcpu *vcpu) +{ /* * We don't need to do this for VHE since the host kernel runs in EL2 * with HCR_EL2.TGE ==1, which makes those bits have no impact. */ - if (!has_vhe()) { - /* Allow physical timer/counter access for the host */ - val = read_sysreg(cnthctl_el2); - val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN; - write_sysreg(val, cnthctl_el2); - } - - /* Clear cntvoff for the host */ - write_sysreg(0, cntvoff_el2); + if (!has_vhe()) + enable_el1_phys_timer_access(); } -void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu) +void __hyp_text __timer_enable_traps(struct kvm_vcpu *vcpu) { - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - u64 val; - - /* Those bits are already configured at boot on VHE-system */ - if (!has_vhe()) { - /* - * Disallow physical timer access for the guest - * Physical counter access is allowed - */ - val = read_sysreg(cnthctl_el2); - val &= ~CNTHCTL_EL1PCEN; - val |= CNTHCTL_EL1PCTEN; - write_sysreg(val, cnthctl_el2); - } - - if (timer->enabled) { - write_sysreg(vtimer->cntvoff, cntvoff_el2); - write_sysreg_el0(vtimer->cnt_cval, cntv_cval); - isb(); - write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); - } + if (!has_vhe()) + disable_el1_phys_timer_access(); } From 40f4cba9a579fe7ad1431269db8aec745c290ba0 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Wed, 5 Jul 2017 12:50:27 +0200 Subject: [PATCH 11/26] KVM: arm/arm64: Set VCPU affinity for virt timer irq As we are about to take physical interrupts for the virtual timer on the host but want to leave those active while running the VM (and let the VM deactivate them), we need to set the vtimer PPI affinity accordingly. Signed-off-by: Christoffer Dall Reviewed-by: Marc Zyngier --- virt/kvm/arm/arch_timer.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 93c8973a71f47..eac1b3d83a86f 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -649,11 +649,20 @@ int kvm_timer_hyp_init(void) return err; } + err = irq_set_vcpu_affinity(host_vtimer_irq, kvm_get_running_vcpus()); + if (err) { + kvm_err("kvm_arch_timer: error setting vcpu affinity\n"); + goto out_free_irq; + } + kvm_info("virtual timer IRQ%d\n", host_vtimer_irq); cpuhp_setup_state(CPUHP_AP_KVM_ARM_TIMER_STARTING, "kvm/arm/timer:starting", kvm_timer_starting_cpu, kvm_timer_dying_cpu); + return 0; +out_free_irq: + free_percpu_irq(host_vtimer_irq, kvm_get_running_vcpus()); return err; } From b103cc3f10c06fb81faacd4ee6f88bbd21246073 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 16 Oct 2016 20:30:38 +0200 Subject: [PATCH 12/26] KVM: arm/arm64: Avoid timer save/restore in vcpu entry/exit We don't need to save and restore the hardware timer state and examine if it generates interrupts on on every entry/exit to the guest. The timer hardware is perfectly capable of telling us when it has expired by signaling interrupts. When taking a vtimer interrupt in the host, we don't want to mess with the timer configuration, we just want to forward the physical interrupt to the guest as a virtual interrupt. We can use the split priority drop and deactivate feature of the GIC to do this, which leaves an EOI'ed interrupt active on the physical distributor, making sure we don't keep taking timer interrupts which would prevent the guest from running. We can then forward the physical interrupt to the VM using the HW bit in the LR of the GIC, like we do already, which lets the guest directly deactivate both the physical and virtual timer simultaneously, allowing the timer hardware to exit the VM and generate a new physical interrupt when the timer output is again asserted later on. We do need to capture this state when migrating VCPUs between physical CPUs, however, which we use the vcpu put/load functions for, which are called through preempt notifiers whenever the thread is scheduled away from the CPU or called directly if we return from the ioctl to userspace. One caveat is that we have to save and restore the timer state in both kvm_timer_vcpu_[put/load] and kvm_timer_[schedule/unschedule], because we can have the following flows: 1. kvm_vcpu_block 2. kvm_timer_schedule 3. schedule 4. kvm_timer_vcpu_put (preempt notifier) 5. schedule (vcpu thread gets scheduled back) 6. kvm_timer_vcpu_load (preempt notifier) 7. kvm_timer_unschedule And a version where we don't actually call schedule: 1. kvm_vcpu_block 2. kvm_timer_schedule 7. kvm_timer_unschedule Since kvm_timer_[schedule/unschedule] may not be followed by put/load, but put/load also may be called independently, we call the timer save/restore functions from both paths. Since they rely on the loaded flag to never save/restore when unnecessary, this doesn't cause any harm, and we ensure that all invokations of either set of functions work as intended. An added benefit beyond not having to read and write the timer sysregs on every entry and exit is that we no longer have to actively write the active state to the physical distributor, because we configured the irq for the vtimer to only get a priority drop when handling the interrupt in the GIC driver (we called irq_set_vcpu_affinity()), and the interrupt stays active after firing on the host. Reviewed-by: Marc Zyngier Signed-off-by: Christoffer Dall --- include/kvm/arm_arch_timer.h | 16 ++- virt/kvm/arm/arch_timer.c | 237 ++++++++++++++++++++++------------- virt/kvm/arm/arm.c | 19 ++- 3 files changed, 178 insertions(+), 94 deletions(-) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 184c3ef2df93c..c538f707e1c1e 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -31,8 +31,15 @@ struct arch_timer_context { /* Timer IRQ */ struct kvm_irq_level irq; - /* Active IRQ state caching */ - bool active_cleared_last; + /* + * We have multiple paths which can save/restore the timer state + * onto the hardware, so we need some way of keeping track of + * where the latest state is. + * + * loaded == true: State is loaded on the hardware registers. + * loaded == false: State is stored in memory. + */ + bool loaded; /* Virtual offset */ u64 cntvoff; @@ -78,10 +85,15 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu); u64 kvm_phys_timer_read(void); +void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu); void kvm_timer_init_vhe(void); #define vcpu_vtimer(v) (&(v)->arch.timer_cpu.vtimer) #define vcpu_ptimer(v) (&(v)->arch.timer_cpu.ptimer) + +void enable_el1_phys_timer_access(void); +void disable_el1_phys_timer_access(void); + #endif diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index eac1b3d83a86f..ec685c1f3b782 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -46,10 +46,9 @@ static const struct kvm_irq_level default_vtimer_irq = { .level = 1, }; -void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) -{ - vcpu_vtimer(vcpu)->active_cleared_last = false; -} +static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); +static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, + struct arch_timer_context *timer_ctx); u64 kvm_phys_timer_read(void) { @@ -69,17 +68,45 @@ static void soft_timer_cancel(struct hrtimer *hrt, struct work_struct *work) cancel_work_sync(work); } -static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) +static void kvm_vtimer_update_mask_user(struct kvm_vcpu *vcpu) { - struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); /* - * We disable the timer in the world switch and let it be - * handled by kvm_timer_sync_hwstate(). Getting a timer - * interrupt at this point is a sure sign of some major - * breakage. + * When using a userspace irqchip with the architected timers, we must + * prevent continuously exiting from the guest, and therefore mask the + * physical interrupt by disabling it on the host interrupt controller + * when the virtual level is high, such that the guest can make + * forward progress. Once we detect the output level being + * de-asserted, we unmask the interrupt again so that we exit from the + * guest when the timer fires. */ - pr_warn("Unexpected interrupt %d on vcpu %p\n", irq, vcpu); + if (vtimer->irq.level) + disable_percpu_irq(host_vtimer_irq); + else + enable_percpu_irq(host_vtimer_irq, 0); +} + +static irqreturn_t kvm_arch_timer_handler(int irq, void *dev_id) +{ + struct kvm_vcpu *vcpu = *(struct kvm_vcpu **)dev_id; + struct arch_timer_context *vtimer; + + if (!vcpu) { + pr_warn_once("Spurious arch timer IRQ on non-VCPU thread\n"); + return IRQ_NONE; + } + vtimer = vcpu_vtimer(vcpu); + + if (!vtimer->irq.level) { + vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); + if (kvm_timer_irq_can_fire(vtimer)) + kvm_timer_update_irq(vcpu, true, vtimer); + } + + if (unlikely(!irqchip_in_kernel(vcpu->kvm))) + kvm_vtimer_update_mask_user(vcpu); + return IRQ_HANDLED; } @@ -215,7 +242,6 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, { int ret; - timer_ctx->active_cleared_last = false; timer_ctx->irq.level = new_level; trace_kvm_timer_update_irq(vcpu->vcpu_id, timer_ctx->irq.irq, timer_ctx->irq.level); @@ -271,10 +297,16 @@ static void phys_timer_emulate(struct kvm_vcpu *vcpu, soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx)); } -static void timer_save_state(struct kvm_vcpu *vcpu) +static void vtimer_save_state(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + unsigned long flags; + + local_irq_save(flags); + + if (!vtimer->loaded) + goto out; if (timer->enabled) { vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); @@ -283,6 +315,10 @@ static void timer_save_state(struct kvm_vcpu *vcpu) /* Disable the virtual timer */ write_sysreg_el0(0, cntv_ctl); + + vtimer->loaded = false; +out: + local_irq_restore(flags); } /* @@ -296,6 +332,8 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); + vtimer_save_state(vcpu); + /* * No need to schedule a background timer if any guest timer has * already expired, because kvm_vcpu_block will return before putting @@ -318,22 +356,34 @@ void kvm_timer_schedule(struct kvm_vcpu *vcpu) soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); } -static void timer_restore_state(struct kvm_vcpu *vcpu) +static void vtimer_restore_state(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + unsigned long flags; + + local_irq_save(flags); + + if (vtimer->loaded) + goto out; if (timer->enabled) { write_sysreg_el0(vtimer->cnt_cval, cntv_cval); isb(); write_sysreg_el0(vtimer->cnt_ctl, cntv_ctl); } + + vtimer->loaded = true; +out: + local_irq_restore(flags); } void kvm_timer_unschedule(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + vtimer_restore_state(vcpu); + soft_timer_cancel(&timer->bg_timer, &timer->expired); } @@ -352,61 +402,45 @@ static void set_cntvoff(u64 cntvoff) kvm_call_hyp(__kvm_timer_set_cntvoff, low, high); } -static void kvm_timer_flush_hwstate_vgic(struct kvm_vcpu *vcpu) +static void kvm_timer_vcpu_load_vgic(struct kvm_vcpu *vcpu) { struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); bool phys_active; int ret; - /* - * If we enter the guest with the virtual input level to the VGIC - * asserted, then we have already told the VGIC what we need to, and - * we don't need to exit from the guest until the guest deactivates - * the already injected interrupt, so therefore we should set the - * hardware active state to prevent unnecessary exits from the guest. - * - * Also, if we enter the guest with the virtual timer interrupt active, - * then it must be active on the physical distributor, because we set - * the HW bit and the guest must be able to deactivate the virtual and - * physical interrupt at the same time. - * - * Conversely, if the virtual input level is deasserted and the virtual - * interrupt is not active, then always clear the hardware active state - * to ensure that hardware interrupts from the timer triggers a guest - * exit. - */ phys_active = vtimer->irq.level || - kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); - - /* - * We want to avoid hitting the (re)distributor as much as - * possible, as this is a potentially expensive MMIO access - * (not to mention locks in the irq layer), and a solution for - * this is to cache the "active" state in memory. - * - * Things to consider: we cannot cache an "active set" state, - * because the HW can change this behind our back (it becomes - * "clear" in the HW). We must then restrict the caching to - * the "clear" state. - * - * The cache is invalidated on: - * - vcpu put, indicating that the HW cannot be trusted to be - * in a sane state on the next vcpu load, - * - any change in the interrupt state - * - * Usage conditions: - * - cached value is "active clear" - * - value to be programmed is "active clear" - */ - if (vtimer->active_cleared_last && !phys_active) - return; + kvm_vgic_map_is_active(vcpu, vtimer->irq.irq); ret = irq_set_irqchip_state(host_vtimer_irq, IRQCHIP_STATE_ACTIVE, phys_active); WARN_ON(ret); +} - vtimer->active_cleared_last = !phys_active; +static void kvm_timer_vcpu_load_user(struct kvm_vcpu *vcpu) +{ + kvm_vtimer_update_mask_user(vcpu); +} + +void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + + if (unlikely(!timer->enabled)) + return; + + if (unlikely(!irqchip_in_kernel(vcpu->kvm))) + kvm_timer_vcpu_load_user(vcpu); + else + kvm_timer_vcpu_load_vgic(vcpu); + + set_cntvoff(vtimer->cntvoff); + + vtimer_restore_state(vcpu); + + if (has_vhe()) + disable_el1_phys_timer_access(); } bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) @@ -426,23 +460,6 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) ptimer->irq.level != plevel; } -static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu) -{ - struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - - /* - * To prevent continuously exiting from the guest, we mask the - * physical interrupt such that the guest can make forward progress. - * Once we detect the output level being deasserted, we unmask the - * interrupt again so that we exit from the guest when the timer - * fires. - */ - if (vtimer->irq.level) - disable_percpu_irq(host_vtimer_irq); - else - enable_percpu_irq(host_vtimer_irq, 0); -} - /** * kvm_timer_flush_hwstate - prepare timers before running the vcpu * @vcpu: The vcpu pointer @@ -455,23 +472,61 @@ static void kvm_timer_flush_hwstate_user(struct kvm_vcpu *vcpu) void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); if (unlikely(!timer->enabled)) return; - kvm_timer_update_state(vcpu); + if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) + kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); /* Set the background timer for the physical timer emulation. */ phys_timer_emulate(vcpu, vcpu_ptimer(vcpu)); +} - if (unlikely(!irqchip_in_kernel(vcpu->kvm))) - kvm_timer_flush_hwstate_user(vcpu); - else - kvm_timer_flush_hwstate_vgic(vcpu); +void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - set_cntvoff(vtimer->cntvoff); - timer_restore_state(vcpu); + if (unlikely(!timer->enabled)) + return; + + if (has_vhe()) + enable_el1_phys_timer_access(); + + vtimer_save_state(vcpu); + + /* + * The kernel may decide to run userspace after calling vcpu_put, so + * we reset cntvoff to 0 to ensure a consistent read between user + * accesses to the virtual counter and kernel access to the physical + * counter. + */ + set_cntvoff(0); +} + +static void unmask_vtimer_irq(struct kvm_vcpu *vcpu) +{ + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + + if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { + kvm_vtimer_update_mask_user(vcpu); + return; + } + + /* + * If the guest disabled the timer without acking the interrupt, then + * we must make sure the physical and virtual active states are in + * sync by deactivating the physical interrupt, because otherwise we + * wouldn't see the next timer interrupt in the host. + */ + if (!kvm_vgic_map_is_active(vcpu, vtimer->irq.irq)) { + int ret; + ret = irq_set_irqchip_state(host_vtimer_irq, + IRQCHIP_STATE_ACTIVE, + false); + WARN_ON(ret); + } } /** @@ -484,6 +539,7 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); /* * This is to cancel the background timer for the physical timer @@ -491,14 +547,19 @@ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) */ soft_timer_cancel(&timer->phys_timer, NULL); - timer_save_state(vcpu); - set_cntvoff(0); - /* - * The guest could have modified the timer registers or the timer - * could have expired, update the timer state. + * If we entered the guest with the vtimer output asserted we have to + * check if the guest has modified the timer so that we should lower + * the line at this point. */ - kvm_timer_update_state(vcpu); + if (vtimer->irq.level) { + vtimer->cnt_ctl = read_sysreg_el0(cntv_ctl); + vtimer->cnt_cval = read_sysreg_el0(cntv_cval); + if (!kvm_timer_should_fire(vtimer)) { + kvm_timer_update_irq(vcpu, false, vtimer); + unmask_vtimer_irq(vcpu); + } + } } int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 27db222a0c8d9..132d39ae13d24 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -354,18 +354,18 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state); kvm_arm_set_running_vcpu(vcpu); - kvm_vgic_load(vcpu); + kvm_timer_vcpu_load(vcpu); } void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) { + kvm_timer_vcpu_put(vcpu); kvm_vgic_put(vcpu); vcpu->cpu = -1; kvm_arm_set_running_vcpu(NULL); - kvm_timer_vcpu_put(vcpu); } static void vcpu_power_off(struct kvm_vcpu *vcpu) @@ -710,15 +710,26 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) kvm_arm_clear_debug(vcpu); /* - * We must sync the PMU and timer state before the vgic state so + * We must sync the PMU state before the vgic state so * that the vgic can properly sample the updated state of the * interrupt line. */ kvm_pmu_sync_hwstate(vcpu); - kvm_timer_sync_hwstate(vcpu); + /* + * Sync the vgic state before syncing the timer state because + * the timer code needs to know if the virtual timer + * interrupts are active. + */ kvm_vgic_sync_hwstate(vcpu); + /* + * Sync the timer hardware state before enabling interrupts as + * we don't want vtimer interrupts to race with syncing the + * timer virtual interrupt state. + */ + kvm_timer_sync_hwstate(vcpu); + /* * We may have taken a host interrupt in HYP mode (ie * while executing the guest). This interrupt is still From 5c5196da4e966cc23af6a1576ad2a9e45bed3f99 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 16 Jun 2017 23:08:57 -0700 Subject: [PATCH 13/26] KVM: arm/arm64: Support EL1 phys timer register access in set/get reg Add suport for the physical timer registers in kvm_arm_timer_set_reg and kvm_arm_timer_get_reg so that these functions can be reused to interact with the rest of the system. Note that this paves part of the way for the physical timer state save/restore, but we still need to add those registers to KVM_GET_REG_LIST before we support migrating the physical timer state. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/uapi/asm/kvm.h | 6 ++++++ arch/arm64/include/uapi/asm/kvm.h | 6 ++++++ virt/kvm/arm/arch_timer.c | 33 +++++++++++++++++++++++++++++-- 3 files changed, 43 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 5db2d4c6a55fa..665c454e50d0a 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -151,6 +151,12 @@ struct kvm_arch_memory_slot { (__ARM_CP15_REG(op1, 0, crm, 0) | KVM_REG_SIZE_U64) #define ARM_CP15_REG64(...) __ARM_CP15_REG64(__VA_ARGS__) +/* PL1 Physical Timer Registers */ +#define KVM_REG_ARM_PTIMER_CTL ARM_CP15_REG32(0, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CNT ARM_CP15_REG64(0, 14) +#define KVM_REG_ARM_PTIMER_CVAL ARM_CP15_REG64(2, 14) + +/* Virtual Timer Registers */ #define KVM_REG_ARM_TIMER_CTL ARM_CP15_REG32(0, 14, 3, 1) #define KVM_REG_ARM_TIMER_CNT ARM_CP15_REG64(1, 14) #define KVM_REG_ARM_TIMER_CVAL ARM_CP15_REG64(3, 14) diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 9f3ca24bbcc6c..0004feef7cc29 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -195,6 +195,12 @@ struct kvm_arch_memory_slot { #define ARM64_SYS_REG(...) (__ARM64_SYS_REG(__VA_ARGS__) | KVM_REG_SIZE_U64) +/* Physical Timer EL0 Registers */ +#define KVM_REG_ARM_PTIMER_CTL ARM64_SYS_REG(3, 3, 14, 2, 1) +#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2) +#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1) + +/* EL0 Virtual Timer Registers */ #define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1) #define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2) #define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2) diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index ec685c1f3b782..d1a6fb12121fb 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -628,10 +628,11 @@ static void kvm_timer_init_interrupt(void *info) int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) { struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); switch (regid) { case KVM_REG_ARM_TIMER_CTL: - vtimer->cnt_ctl = value; + vtimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT; break; case KVM_REG_ARM_TIMER_CNT: update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); @@ -639,6 +640,13 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) case KVM_REG_ARM_TIMER_CVAL: vtimer->cnt_cval = value; break; + case KVM_REG_ARM_PTIMER_CTL: + ptimer->cnt_ctl = value & ~ARCH_TIMER_CTRL_IT_STAT; + break; + case KVM_REG_ARM_PTIMER_CVAL: + ptimer->cnt_cval = value; + break; + default: return -1; } @@ -647,17 +655,38 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) return 0; } +static u64 read_timer_ctl(struct arch_timer_context *timer) +{ + /* + * Set ISTATUS bit if it's expired. + * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is + * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit + * regardless of ENABLE bit for our implementation convenience. + */ + if (!kvm_timer_compute_delta(timer)) + return timer->cnt_ctl | ARCH_TIMER_CTRL_IT_STAT; + else + return timer->cnt_ctl; +} + u64 kvm_arm_timer_get_reg(struct kvm_vcpu *vcpu, u64 regid) { + struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); switch (regid) { case KVM_REG_ARM_TIMER_CTL: - return vtimer->cnt_ctl; + return read_timer_ctl(vtimer); case KVM_REG_ARM_TIMER_CNT: return kvm_phys_timer_read() - vtimer->cntvoff; case KVM_REG_ARM_TIMER_CVAL: return vtimer->cnt_cval; + case KVM_REG_ARM_PTIMER_CTL: + return read_timer_ctl(ptimer); + case KVM_REG_ARM_PTIMER_CVAL: + return ptimer->cnt_cval; + case KVM_REG_ARM_PTIMER_CNT: + return kvm_phys_timer_read(); } return (u64)-1; } From c1b135af8387bc6c6d43ed61a65647c68f231ea1 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 16 Jun 2017 23:12:06 -0700 Subject: [PATCH 14/26] KVM: arm/arm64: Use kvm_arm_timer_set/get_reg for guest register traps When trapping on a guest access to one of the timer registers, we were messing with the internals of the timer state from the sysregs handling code, and that logic was about to receive more added complexity when optimizing the timer handling code. Therefore, since we already have timer register access functions (to access registers from userspace), reuse those for the timer register traps from a VM and let the timer code maintain its own consistency. Signed-off-by: Christoffer Dall Acked-by: Marc Zyngier --- arch/arm64/kvm/sys_regs.c | 41 +++++++++++++-------------------------- 1 file changed, 14 insertions(+), 27 deletions(-) diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c index 2e070d3baf9f1..bb0e41b3154e6 100644 --- a/arch/arm64/kvm/sys_regs.c +++ b/arch/arm64/kvm/sys_regs.c @@ -841,13 +841,16 @@ static bool access_cntp_tval(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); u64 now = kvm_phys_timer_read(); + u64 cval; - if (p->is_write) - ptimer->cnt_cval = p->regval + now; - else - p->regval = ptimer->cnt_cval - now; + if (p->is_write) { + kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, + p->regval + now); + } else { + cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); + p->regval = cval - now; + } return true; } @@ -856,24 +859,10 @@ static bool access_cntp_ctl(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - - if (p->is_write) { - /* ISTATUS bit is read-only */ - ptimer->cnt_ctl = p->regval & ~ARCH_TIMER_CTRL_IT_STAT; - } else { - u64 now = kvm_phys_timer_read(); - - p->regval = ptimer->cnt_ctl; - /* - * Set ISTATUS bit if it's expired. - * Note that according to ARMv8 ARM Issue A.k, ISTATUS bit is - * UNKNOWN when ENABLE bit is 0, so we chose to set ISTATUS bit - * regardless of ENABLE bit for our implementation convenience. - */ - if (ptimer->cnt_cval <= now) - p->regval |= ARCH_TIMER_CTRL_IT_STAT; - } + if (p->is_write) + kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval); + else + p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL); return true; } @@ -882,12 +871,10 @@ static bool access_cntp_cval(struct kvm_vcpu *vcpu, struct sys_reg_params *p, const struct sys_reg_desc *r) { - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - if (p->is_write) - ptimer->cnt_cval = p->regval; + kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval); else - p->regval = ptimer->cnt_cval; + p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL); return true; } From cda93b7aa4657f2a9df28d56df8259226be8b0e9 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 18 Jun 2017 01:41:06 -0700 Subject: [PATCH 15/26] KVM: arm/arm64: Move phys_timer_emulate function We are about to call phys_timer_emulate() from kvm_timer_update_state() and modify phys_timer_emulate() at the same time. Moving the function and modifying it in a single patch makes the diff hard to read, so do this separately first. No functional change. Signed-off-by: Christoffer Dall Acked-by: Marc Zyngier --- virt/kvm/arm/arch_timer.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index d1a6fb12121fb..7f4f12d48a1a6 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -255,6 +255,22 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, } } +/* Schedule the background timer for the emulated timer. */ +static void phys_timer_emulate(struct kvm_vcpu *vcpu, + struct arch_timer_context *timer_ctx) +{ + struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + + if (kvm_timer_should_fire(timer_ctx)) + return; + + if (!kvm_timer_irq_can_fire(timer_ctx)) + return; + + /* The timer has not yet expired, schedule a background timer */ + soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx)); +} + /* * Check if there was a change in the timer state (should we raise or lower * the line level to the GIC). @@ -281,22 +297,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); } -/* Schedule the background timer for the emulated timer. */ -static void phys_timer_emulate(struct kvm_vcpu *vcpu, - struct arch_timer_context *timer_ctx) -{ - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - - if (kvm_timer_should_fire(timer_ctx)) - return; - - if (!kvm_timer_irq_can_fire(timer_ctx)) - return; - - /* The timer has not yet expired, schedule a background timer */ - soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx)); -} - static void vtimer_save_state(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; From bbdd52cfcba290560909498c7d5681f19894587b Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 18 Jun 2017 01:42:55 -0700 Subject: [PATCH 16/26] KVM: arm/arm64: Avoid phys timer emulation in vcpu entry/exit There is no need to schedule and cancel a hrtimer when entering and exiting the guest, because we know when the physical timer is going to fire when the guest programs it, and we can simply program the hrtimer at that point. Now when the register modifications from the guest go through the kvm_arm_timer_set/get_reg functions, which always call kvm_timer_update_state(), we can simply consider the timer state in this function and schedule and cancel the timers as needed. This avoids looking at the physical timer emulation state when entering and exiting the VCPU, allowing for faster servicing of the VM when needed. Signed-off-by: Christoffer Dall Reviewed-by: Marc Zyngier --- virt/kvm/arm/arch_timer.c | 75 ++++++++++++++++++++++++++------------- 1 file changed, 51 insertions(+), 24 deletions(-) diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 7f4f12d48a1a6..20e56c4206326 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -202,7 +202,27 @@ static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) { - WARN(1, "Timer only used to ensure guest exit - unexpected event."); + struct arch_timer_context *ptimer; + struct arch_timer_cpu *timer; + struct kvm_vcpu *vcpu; + u64 ns; + + timer = container_of(hrt, struct arch_timer_cpu, phys_timer); + vcpu = container_of(timer, struct kvm_vcpu, arch.timer_cpu); + ptimer = vcpu_ptimer(vcpu); + + /* + * Check that the timer has really expired from the guest's + * PoV (NTP on the host may have forced it to expire + * early). If not ready, schedule for a later time. + */ + ns = kvm_timer_compute_delta(ptimer); + if (unlikely(ns)) { + hrtimer_forward_now(hrt, ns_to_ktime(ns)); + return HRTIMER_RESTART; + } + + kvm_timer_update_irq(vcpu, true, ptimer); return HRTIMER_NORESTART; } @@ -256,24 +276,28 @@ static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, } /* Schedule the background timer for the emulated timer. */ -static void phys_timer_emulate(struct kvm_vcpu *vcpu, - struct arch_timer_context *timer_ctx) +static void phys_timer_emulate(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; + struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - if (kvm_timer_should_fire(timer_ctx)) - return; - - if (!kvm_timer_irq_can_fire(timer_ctx)) + /* + * If the timer can fire now we have just raised the IRQ line and we + * don't need to have a soft timer scheduled for the future. If the + * timer cannot fire at all, then we also don't need a soft timer. + */ + if (kvm_timer_should_fire(ptimer) || !kvm_timer_irq_can_fire(ptimer)) { + soft_timer_cancel(&timer->phys_timer, NULL); return; + } - /* The timer has not yet expired, schedule a background timer */ - soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(timer_ctx)); + soft_timer_start(&timer->phys_timer, kvm_timer_compute_delta(ptimer)); } /* - * Check if there was a change in the timer state (should we raise or lower - * the line level to the GIC). + * Check if there was a change in the timer state, so that we should either + * raise or lower the line level to the GIC or schedule a background timer to + * emulate the physical timer. */ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) { @@ -295,6 +319,8 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); + + phys_timer_emulate(vcpu); } static void vtimer_save_state(struct kvm_vcpu *vcpu) @@ -441,6 +467,9 @@ void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu) if (has_vhe()) disable_el1_phys_timer_access(); + + /* Set the background timer for the physical timer emulation. */ + phys_timer_emulate(vcpu); } bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) @@ -476,12 +505,6 @@ void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) if (unlikely(!timer->enabled)) return; - - if (kvm_timer_should_fire(ptimer) != ptimer->irq.level) - kvm_timer_update_irq(vcpu, !ptimer->irq.level, ptimer); - - /* Set the background timer for the physical timer emulation. */ - phys_timer_emulate(vcpu, vcpu_ptimer(vcpu)); } void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) @@ -496,6 +519,17 @@ void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) vtimer_save_state(vcpu); + /* + * Cancel the physical timer emulation, because the only case where we + * need it after a vcpu_put is in the context of a sleeping VCPU, and + * in that case we already factor in the deadline for the physical + * timer when scheduling the bg_timer. + * + * In any case, we re-schedule the hrtimer for the physical timer when + * coming back to the VCPU thread in kvm_timer_vcpu_load(). + */ + soft_timer_cancel(&timer->phys_timer, NULL); + /* * The kernel may decide to run userspace after calling vcpu_put, so * we reset cntvoff to 0 to ensure a consistent read between user @@ -538,15 +572,8 @@ static void unmask_vtimer_irq(struct kvm_vcpu *vcpu) */ void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu) { - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); - /* - * This is to cancel the background timer for the physical timer - * emulation if it is set. - */ - soft_timer_cancel(&timer->phys_timer, NULL); - /* * If we entered the guest with the vtimer output asserted we have to * check if the guest has modified the timer so that we should lower From 7e90c8e5704cbb299d48e7debb1e61614cb12f41 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Tue, 20 Jun 2017 07:56:20 -0700 Subject: [PATCH 17/26] KVM: arm/arm64: Get rid of kvm_timer_flush_hwstate Now when both the vtimer and the ptimer when using both the in-kernel vgic emulation and a userspace IRQ chip are driven by the timer signals and at the vcpu load/put boundaries, instead of recomputing the timer state at every entry/exit to/from the guest, we can get entirely rid of the flush hwstate function. Signed-off-by: Christoffer Dall Acked-by: Marc Zyngier --- include/kvm/arm_arch_timer.h | 1 - virt/kvm/arm/arch_timer.c | 24 ------------------------ virt/kvm/arm/arm.c | 1 - 3 files changed, 26 deletions(-) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index c538f707e1c1e..2352f3a4e88b0 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -66,7 +66,6 @@ int kvm_timer_hyp_init(void); int kvm_timer_enable(struct kvm_vcpu *vcpu); int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu); void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu); -void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu); bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu); void kvm_timer_update_run(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 20e56c4206326..53d9bd4a734f7 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -305,12 +305,6 @@ static void kvm_timer_update_state(struct kvm_vcpu *vcpu) struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - /* - * If userspace modified the timer registers via SET_ONE_REG before - * the vgic was initialized, we mustn't set the vtimer->irq.level value - * because the guest would never see the interrupt. Instead wait - * until we call this function from kvm_timer_flush_hwstate. - */ if (unlikely(!timer->enabled)) return; @@ -489,24 +483,6 @@ bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu) ptimer->irq.level != plevel; } -/** - * kvm_timer_flush_hwstate - prepare timers before running the vcpu - * @vcpu: The vcpu pointer - * - * Check if the virtual timer has expired while we were running in the host, - * and inject an interrupt if that was the case, making sure the timer is - * masked or disabled on the host so that we keep executing. Also schedule a - * software timer for the physical timer if it is enabled. - */ -void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu) -{ - struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; - struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); - - if (unlikely(!timer->enabled)) - return; -} - void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 132d39ae13d24..14c50d142c675 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -656,7 +656,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) local_irq_disable(); - kvm_timer_flush_hwstate(vcpu); kvm_vgic_flush_hwstate(vcpu); /* From 1c88ab7ec8c53c4d806bb2b6871ddafdebbffa8b Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Fri, 6 Jan 2017 16:07:48 +0100 Subject: [PATCH 18/26] KVM: arm/arm64: Rework kvm_timer_should_fire kvm_timer_should_fire() can be called in two different situations from the kvm_vcpu_block(). The first case is before calling kvm_timer_schedule(), used for wait polling, and in this case the VCPU thread is running and the timer state is loaded onto the hardware so all we have to do is check if the virtual interrupt lines are asserted, becasue the timer interrupt handler functions will raise those lines as appropriate. The second case is inside the wait loop of kvm_vcpu_block(), where we have already called kvm_timer_schedule() and therefore the hardware will be disabled and the software view of the timer state is up to date (timer->loaded is false), and so we can simply check if the timer should fire by looking at the software state. Signed-off-by: Christoffer Dall Reviewed-by: Marc Zyngier --- include/kvm/arm_arch_timer.h | 3 ++- virt/kvm/arm/arch_timer.c | 22 +++++++++++++++++++++- virt/kvm/arm/arm.c | 3 +-- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 2352f3a4e88b0..01ee473517e25 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -78,7 +78,8 @@ int kvm_arm_timer_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_timer_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr); -bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); +bool kvm_timer_is_pending(struct kvm_vcpu *vcpu); + void kvm_timer_schedule(struct kvm_vcpu *vcpu); void kvm_timer_unschedule(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 53d9bd4a734f7..2035cf2517013 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -49,6 +49,7 @@ static const struct kvm_irq_level default_vtimer_irq = { static bool kvm_timer_irq_can_fire(struct arch_timer_context *timer_ctx); static void kvm_timer_update_irq(struct kvm_vcpu *vcpu, bool new_level, struct arch_timer_context *timer_ctx); +static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx); u64 kvm_phys_timer_read(void) { @@ -226,7 +227,7 @@ static enum hrtimer_restart kvm_phys_timer_expire(struct hrtimer *hrt) return HRTIMER_NORESTART; } -bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) +static bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) { u64 cval, now; @@ -239,6 +240,25 @@ bool kvm_timer_should_fire(struct arch_timer_context *timer_ctx) return cval <= now; } +bool kvm_timer_is_pending(struct kvm_vcpu *vcpu) +{ + struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); + struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); + + if (vtimer->irq.level || ptimer->irq.level) + return true; + + /* + * When this is called from withing the wait loop of kvm_vcpu_block(), + * the software view of the timer state is up to date (timer->loaded + * is false), and so we can simply check if the timer should fire now. + */ + if (!vtimer->loaded && kvm_timer_should_fire(vtimer)) + return true; + + return kvm_timer_should_fire(ptimer); +} + /* * Reflect the timer output level into the kvm_run structure */ diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 14c50d142c675..bc126fb99a3d2 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -307,8 +307,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) { - return kvm_timer_should_fire(vcpu_vtimer(vcpu)) || - kvm_timer_should_fire(vcpu_ptimer(vcpu)); + return kvm_timer_is_pending(vcpu); } void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) From 4a2c4da1250d09c1477eff8e14a82b2573481f32 Mon Sep 17 00:00:00 2001 From: Christoffer Dall Date: Sun, 29 Oct 2017 02:04:55 +0100 Subject: [PATCH 19/26] arm/arm64: KVM: Load the timer state when enabling the timer After being lazy with saving/restoring the timer state, we defer that work to vcpu_load and vcpu_put, which ensure that the timer state is loaded on the hardware timers whenever the VCPU runs. Unfortunately, we are failing to do that the first time vcpu_load() runs, because the timer has not yet been enabled at that time. As long as the initialized timer state matches what happens to be in the hardware (a disabled timer, because we never leave the timer screaming), this does not show up as a problem, but is nevertheless incorrect. The solution is simple; disable preemption while setting the timer to be enabled, and call the timer load function when first enabling the timer. Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- virt/kvm/arm/arch_timer.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 2035cf2517013..4db54ff08d9e9 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -861,7 +861,11 @@ int kvm_timer_enable(struct kvm_vcpu *vcpu) return ret; no_vgic: + preempt_disable(); timer->enabled = 1; + kvm_timer_vcpu_load_vgic(vcpu); + preempt_enable(); + return 0; } From 0a0d389ea63ced83e0e6da2b78c964b9cb3764ba Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 26 Oct 2017 17:23:07 +0200 Subject: [PATCH 20/26] KVM: arm/arm64: vgic-its: Remove kvm_its_unmap_device Let's remove kvm_its_unmap_device and use kvm_its_free_device as both functions are identical. Signed-off-by: Eric Auger Acked-by: Marc Zyngier Acked-by: Christoffer Dall Signed-off-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-its.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 9f5e347676284..75c18183eee37 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -897,7 +897,7 @@ static int vgic_its_cmd_handle_mapi(struct kvm *kvm, struct vgic_its *its, } /* Requires the its_lock to be held. */ -static void vgic_its_unmap_device(struct kvm *kvm, struct its_device *device) +static void vgic_its_free_device(struct kvm *kvm, struct its_device *device) { struct its_ite *ite, *temp; @@ -960,7 +960,7 @@ static int vgic_its_cmd_handle_mapd(struct kvm *kvm, struct vgic_its *its, * by removing the mapping and re-establishing it. */ if (device) - vgic_its_unmap_device(kvm, device); + vgic_its_free_device(kvm, device); /* * The spec does not say whether unmapping a not-mapped device @@ -1615,16 +1615,6 @@ static int vgic_its_create(struct kvm_device *dev, u32 type) return vgic_its_set_abi(its, NR_ITS_ABIS - 1); } -static void vgic_its_free_device(struct kvm *kvm, struct its_device *dev) -{ - struct its_ite *ite, *tmp; - - list_for_each_entry_safe(ite, tmp, &dev->itt_head, ite_list) - its_free_ite(kvm, ite); - list_del(&dev->dev_list); - kfree(dev); -} - static void vgic_its_destroy(struct kvm_device *kvm_dev) { struct kvm *kvm = kvm_dev->kvm; From 2f609a03391f24ac31f12f27790194ac49dff832 Mon Sep 17 00:00:00 2001 From: wanghaibin Date: Thu, 26 Oct 2017 17:23:08 +0200 Subject: [PATCH 21/26] KVM: arm/arm64: vgic-its: New helper functions to free the caches We create two new functions that free the device and collection lists. They are currently called by vgic_its_destroy() and other callers will be added in subsequent patches. We also remove the check on its->device_list.next. Lists are initialized in vgic_create_its() and the device is added to the device list only if this latter succeeds. vgic_its_destroy is the device destroy ops. This latter is called by kvm_destroy_devices() which loops on all created devices. So at this point the list is initialized. Acked-by: Marc Zyngier Signed-off-by: wanghaibin Signed-off-by: Eric Auger Signed-off-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-its.c | 41 ++++++++++++++++++------------------ 1 file changed, 20 insertions(+), 21 deletions(-) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 75c18183eee37..d46256c07ba5b 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -913,6 +913,24 @@ static void vgic_its_free_device(struct kvm *kvm, struct its_device *device) kfree(device); } +/* its lock must be held */ +static void vgic_its_free_device_list(struct kvm *kvm, struct vgic_its *its) +{ + struct its_device *cur, *temp; + + list_for_each_entry_safe(cur, temp, &its->device_list, dev_list) + vgic_its_free_device(kvm, cur); +} + +/* its lock must be held */ +static void vgic_its_free_collection_list(struct kvm *kvm, struct vgic_its *its) +{ + struct its_collection *cur, *temp; + + list_for_each_entry_safe(cur, temp, &its->collection_list, coll_list) + vgic_its_free_collection(its, cur->collection_id); +} + /* Must be called with its_lock mutex held */ static struct its_device *vgic_its_alloc_device(struct vgic_its *its, u32 device_id, gpa_t itt_addr, @@ -1619,32 +1637,13 @@ static void vgic_its_destroy(struct kvm_device *kvm_dev) { struct kvm *kvm = kvm_dev->kvm; struct vgic_its *its = kvm_dev->private; - struct list_head *cur, *temp; - - /* - * We may end up here without the lists ever having been initialized. - * Check this and bail out early to avoid dereferencing a NULL pointer. - */ - if (!its->device_list.next) - return; mutex_lock(&its->its_lock); - list_for_each_safe(cur, temp, &its->device_list) { - struct its_device *dev; - dev = list_entry(cur, struct its_device, dev_list); - vgic_its_free_device(kvm, dev); - } - - list_for_each_safe(cur, temp, &its->collection_list) { - struct its_collection *coll; + vgic_its_free_device_list(kvm, its); + vgic_its_free_collection_list(kvm, its); - coll = list_entry(cur, struct its_collection, coll_list); - list_del(cur); - kfree(coll); - } mutex_unlock(&its->its_lock); - kfree(its); } From 36d6961c2b481614e8d37495896436d9322df052 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 26 Oct 2017 17:23:09 +0200 Subject: [PATCH 22/26] KVM: arm/arm64: vgic-its: Free caches when GITS_BASER Valid bit is cleared When the GITS_BASER.Valid gets cleared, the data structures in guest RAM are not valid anymore. The device, collection and LPI lists stored in the in-kernel ITS represent the same information in some form of cache. So let's void the cache. Reviewed-by: Marc Zyngier Reviewed-by: Christoffer Dall Signed-off-by: Eric Auger Signed-off-by: Christoffer Dall --- virt/kvm/arm/vgic/vgic-its.c | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index d46256c07ba5b..1732e08a4375f 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -1431,7 +1431,7 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, unsigned long val) { const struct vgic_its_abi *abi = vgic_its_get_abi(its); - u64 entry_size, device_type; + u64 entry_size, table_type; u64 reg, *regptr, clearbits = 0; /* When GITS_CTLR.Enable is 1, we ignore write accesses. */ @@ -1442,12 +1442,12 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, case 0: regptr = &its->baser_device_table; entry_size = abi->dte_esz; - device_type = GITS_BASER_TYPE_DEVICE; + table_type = GITS_BASER_TYPE_DEVICE; break; case 1: regptr = &its->baser_coll_table; entry_size = abi->cte_esz; - device_type = GITS_BASER_TYPE_COLLECTION; + table_type = GITS_BASER_TYPE_COLLECTION; clearbits = GITS_BASER_INDIRECT; break; default: @@ -1459,10 +1459,24 @@ static void vgic_mmio_write_its_baser(struct kvm *kvm, reg &= ~clearbits; reg |= (entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT; - reg |= device_type << GITS_BASER_TYPE_SHIFT; + reg |= table_type << GITS_BASER_TYPE_SHIFT; reg = vgic_sanitise_its_baser(reg); *regptr = reg; + + if (!(reg & GITS_BASER_VALID)) { + /* Take the its_lock to prevent a race with a save/restore */ + mutex_lock(&its->its_lock); + switch (table_type) { + case GITS_BASER_TYPE_DEVICE: + vgic_its_free_device_list(kvm, its); + break; + case GITS_BASER_TYPE_COLLECTION: + vgic_its_free_collection_list(kvm, its); + break; + } + mutex_unlock(&its->its_lock); + } } static unsigned long vgic_mmio_read_its_ctlr(struct kvm *vcpu, From ae204f80ca4074b6db07bd28c75faf02718735a0 Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 26 Oct 2017 17:23:10 +0200 Subject: [PATCH 23/26] KVM: arm/arm64: Document KVM_DEV_ARM_ITS_CTRL_RESET At the moment, the in-kernel emulated ITS is not properly reset. On guest restart/reset some registers keep their old values and internal structures like device, ITE, and collection lists are not freed. This may lead to various bugs. Among them, we can have incorrect state backup or failure when saving the ITS state at early guest boot stage. This patch documents a new attribute, KVM_DEV_ARM_ITS_CTRL_RESET in the KVM_DEV_ARM_VGIC_GRP_CTRL group. Upon this action, we can reset registers and especially those pointing to tables previously allocated by the guest and free the internal data structures storing the list of devices, collections and lpis. The usual approach for device reset of having userspace write the reset values of the registers to the kernel via the register read/write APIs doesn't work for the ITS because it has some internal state (caches) which is not exposed as registers, and there is no register interface for "drop cached data without writing it back to RAM". So we need a KVM API which mimics the hardware's reset line, to provide the equivalent behaviour to a "pull the power cord out of the back of the machine" reset. Reviewed-by: Christoffer Dall Reviewed-by: Marc Zyngier Signed-off-by: Eric Auger Reported-by: wanghaibin Signed-off-by: Christoffer Dall --- .../virtual/kvm/devices/arm-vgic-its.txt | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/Documentation/virtual/kvm/devices/arm-vgic-its.txt b/Documentation/virtual/kvm/devices/arm-vgic-its.txt index eb06beb759601..8d5830eab26a6 100644 --- a/Documentation/virtual/kvm/devices/arm-vgic-its.txt +++ b/Documentation/virtual/kvm/devices/arm-vgic-its.txt @@ -33,6 +33,10 @@ Groups: request the initialization of the ITS, no additional parameter in kvm_device_attr.addr. + KVM_DEV_ARM_ITS_CTRL_RESET + reset the ITS, no additional parameter in kvm_device_attr.addr. + See "ITS Reset State" section. + KVM_DEV_ARM_ITS_SAVE_TABLES save the ITS table data into guest RAM, at the location provisioned by the guest in corresponding registers/table entries. @@ -157,3 +161,19 @@ Then vcpus can be started. - pINTID is the physical LPI ID; if zero, it means the entry is not valid and other fields are not meaningful. - ICID is the collection ID + + ITS Reset State: + ---------------- + +RESET returns the ITS to the same state that it was when first created and +initialized. When the RESET command returns, the following things are +guaranteed: + +- The ITS is not enabled and quiescent + GITS_CTLR.Enabled = 0 .Quiescent=1 +- There is no internally cached state +- No collection or device table are used + GITS_BASER.Valid = 0 +- GITS_CBASER = 0, GITS_CREADR = 0, GITS_CWRITER = 0 +- The ABI version is unchanged and remains the one set when the ITS + device was first created. From 3eb4271b4ab6d38a3c113a19f358f606702e08ef Mon Sep 17 00:00:00 2001 From: Eric Auger Date: Thu, 26 Oct 2017 17:23:11 +0200 Subject: [PATCH 24/26] KVM: arm/arm64: vgic-its: Implement KVM_DEV_ARM_ITS_CTRL_RESET On reset we clear the valid bits of GITS_CBASER and GITS_BASER. We also clear command queue registers and free the cache (device, collection, and lpi lists). As we need to take the same locks as save/restore functions, we create a vgic_its_ctrl() wrapper that handles KVM_DEV_ARM_VGIC_GRP_CTRL group functions. Reviewed-by: Christoffer Dall Reviewed-by: Marc Zyngier Signed-off-by: Eric Auger Signed-off-by: Christoffer Dall --- arch/arm/include/uapi/asm/kvm.h | 1 + arch/arm64/include/uapi/asm/kvm.h | 1 + virt/kvm/arm/vgic/vgic-its.c | 105 ++++++++++++++++-------------- 3 files changed, 58 insertions(+), 49 deletions(-) diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index 665c454e50d0a..b56895593c840 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -221,6 +221,7 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 +#define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* KVM_IRQ_LINE irq field index values */ #define KVM_ARM_IRQ_TYPE_SHIFT 24 diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h index 0004feef7cc29..37ca7394549cf 100644 --- a/arch/arm64/include/uapi/asm/kvm.h +++ b/arch/arm64/include/uapi/asm/kvm.h @@ -233,6 +233,7 @@ struct kvm_arch_memory_slot { #define KVM_DEV_ARM_ITS_SAVE_TABLES 1 #define KVM_DEV_ARM_ITS_RESTORE_TABLES 2 #define KVM_DEV_ARM_VGIC_SAVE_PENDING_TABLES 3 +#define KVM_DEV_ARM_ITS_CTRL_RESET 4 /* Device Control API on vcpu fd */ #define KVM_ARM_VCPU_PMU_V3_CTRL 0 diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c index 1732e08a4375f..40791c1217105 100644 --- a/virt/kvm/arm/vgic/vgic-its.c +++ b/virt/kvm/arm/vgic/vgic-its.c @@ -2273,29 +2273,13 @@ static int vgic_its_restore_collection_table(struct vgic_its *its) */ static int vgic_its_save_tables_v0(struct vgic_its *its) { - struct kvm *kvm = its->dev->kvm; int ret; - mutex_lock(&kvm->lock); - mutex_lock(&its->its_lock); - - if (!lock_all_vcpus(kvm)) { - mutex_unlock(&its->its_lock); - mutex_unlock(&kvm->lock); - return -EBUSY; - } - ret = vgic_its_save_device_tables(its); if (ret) - goto out; - - ret = vgic_its_save_collection_table(its); + return ret; -out: - unlock_all_vcpus(kvm); - mutex_unlock(&its->its_lock); - mutex_unlock(&kvm->lock); - return ret; + return vgic_its_save_collection_table(its); } /** @@ -2305,29 +2289,13 @@ static int vgic_its_save_tables_v0(struct vgic_its *its) */ static int vgic_its_restore_tables_v0(struct vgic_its *its) { - struct kvm *kvm = its->dev->kvm; int ret; - mutex_lock(&kvm->lock); - mutex_lock(&its->its_lock); - - if (!lock_all_vcpus(kvm)) { - mutex_unlock(&its->its_lock); - mutex_unlock(&kvm->lock); - return -EBUSY; - } - ret = vgic_its_restore_collection_table(its); if (ret) - goto out; - - ret = vgic_its_restore_device_tables(its); -out: - unlock_all_vcpus(kvm); - mutex_unlock(&its->its_lock); - mutex_unlock(&kvm->lock); + return ret; - return ret; + return vgic_its_restore_device_tables(its); } static int vgic_its_commit_v0(struct vgic_its *its) @@ -2346,6 +2314,19 @@ static int vgic_its_commit_v0(struct vgic_its *its) return 0; } +static void vgic_its_reset(struct kvm *kvm, struct vgic_its *its) +{ + /* We need to keep the ABI specific field values */ + its->baser_coll_table &= ~GITS_BASER_VALID; + its->baser_device_table &= ~GITS_BASER_VALID; + its->cbaser = 0; + its->creadr = 0; + its->cwriter = 0; + its->enabled = 0; + vgic_its_free_device_list(kvm, its); + vgic_its_free_collection_list(kvm, its); +} + static int vgic_its_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { @@ -2360,6 +2341,8 @@ static int vgic_its_has_attr(struct kvm_device *dev, switch (attr->attr) { case KVM_DEV_ARM_VGIC_CTRL_INIT: return 0; + case KVM_DEV_ARM_ITS_CTRL_RESET: + return 0; case KVM_DEV_ARM_ITS_SAVE_TABLES: return 0; case KVM_DEV_ARM_ITS_RESTORE_TABLES: @@ -2372,6 +2355,41 @@ static int vgic_its_has_attr(struct kvm_device *dev, return -ENXIO; } +static int vgic_its_ctrl(struct kvm *kvm, struct vgic_its *its, u64 attr) +{ + const struct vgic_its_abi *abi = vgic_its_get_abi(its); + int ret = 0; + + if (attr == KVM_DEV_ARM_VGIC_CTRL_INIT) /* Nothing to do */ + return 0; + + mutex_lock(&kvm->lock); + mutex_lock(&its->its_lock); + + if (!lock_all_vcpus(kvm)) { + mutex_unlock(&its->its_lock); + mutex_unlock(&kvm->lock); + return -EBUSY; + } + + switch (attr) { + case KVM_DEV_ARM_ITS_CTRL_RESET: + vgic_its_reset(kvm, its); + break; + case KVM_DEV_ARM_ITS_SAVE_TABLES: + ret = abi->save_tables(its); + break; + case KVM_DEV_ARM_ITS_RESTORE_TABLES: + ret = abi->restore_tables(its); + break; + } + + unlock_all_vcpus(kvm); + mutex_unlock(&its->its_lock); + mutex_unlock(&kvm->lock); + return ret; +} + static int vgic_its_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr) { @@ -2397,19 +2415,8 @@ static int vgic_its_set_attr(struct kvm_device *dev, return vgic_register_its_iodev(dev->kvm, its, addr); } - case KVM_DEV_ARM_VGIC_GRP_CTRL: { - const struct vgic_its_abi *abi = vgic_its_get_abi(its); - - switch (attr->attr) { - case KVM_DEV_ARM_VGIC_CTRL_INIT: - /* Nothing to do */ - return 0; - case KVM_DEV_ARM_ITS_SAVE_TABLES: - return abi->save_tables(its); - case KVM_DEV_ARM_ITS_RESTORE_TABLES: - return abi->restore_tables(its); - } - } + case KVM_DEV_ARM_VGIC_GRP_CTRL: + return vgic_its_ctrl(dev->kvm, its, attr->attr); case KVM_DEV_ARM_VGIC_GRP_ITS_REGS: { u64 __user *uaddr = (u64 __user *)(long)attr->addr; u64 reg; From 74a64a981662ab34289b3c90f6f964aa38ec1d9f Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sun, 29 Oct 2017 02:18:09 +0000 Subject: [PATCH 25/26] KVM: arm/arm64: Unify 32bit fault injection Both arm and arm64 implementations are capable of injecting faults, and yet have completely divergent implementations, leading to different bugs and reduced maintainability. Let's elect the arm64 version as the canonical one and move it into aarch32.c, which is common to both architectures. Reviewed-by: Christoffer Dall Signed-off-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 36 ++++++- arch/arm/kvm/emulate.c | 139 --------------------------- arch/arm64/include/asm/kvm_emulate.h | 3 + arch/arm64/kvm/inject_fault.c | 74 +------------- virt/kvm/arm/aarch32.c | 97 ++++++++++++++++++- 5 files changed, 131 insertions(+), 218 deletions(-) diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 98089ffd91bb6..dcae3970148d0 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -25,7 +25,22 @@ #include #include +/* arm64 compatibility macros */ +#define COMPAT_PSR_MODE_ABT ABT_MODE +#define COMPAT_PSR_MODE_UND UND_MODE +#define COMPAT_PSR_T_BIT PSR_T_BIT +#define COMPAT_PSR_I_BIT PSR_I_BIT +#define COMPAT_PSR_A_BIT PSR_A_BIT +#define COMPAT_PSR_E_BIT PSR_E_BIT +#define COMPAT_PSR_IT_MASK PSR_IT_MASK + unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); + +static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num) +{ + return vcpu_reg(vcpu, reg_num); +} + unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, @@ -42,10 +57,25 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); -void kvm_inject_undefined(struct kvm_vcpu *vcpu); +void kvm_inject_undef32(struct kvm_vcpu *vcpu); +void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_vabt(struct kvm_vcpu *vcpu); -void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); -void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); + +static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu) +{ + kvm_inject_undef32(vcpu); +} + +static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) +{ + kvm_inject_dabt32(vcpu, addr); +} + +static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) +{ + kvm_inject_pabt32(vcpu, addr); +} static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) { diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index 0064b86a2c879..cdff963f133a1 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c @@ -165,145 +165,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) * Inject exceptions into the guest */ -static u32 exc_vector_base(struct kvm_vcpu *vcpu) -{ - u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); - u32 vbar = vcpu_cp15(vcpu, c12_VBAR); - - if (sctlr & SCTLR_V) - return 0xffff0000; - else /* always have security exceptions */ - return vbar; -} - -/* - * Switch to an exception mode, updating both CPSR and SPSR. Follow - * the logic described in AArch32.EnterMode() from the ARMv8 ARM. - */ -static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) -{ - unsigned long cpsr = *vcpu_cpsr(vcpu); - u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); - - *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; - - switch (mode) { - case FIQ_MODE: - *vcpu_cpsr(vcpu) |= PSR_F_BIT; - /* Fall through */ - case ABT_MODE: - case IRQ_MODE: - *vcpu_cpsr(vcpu) |= PSR_A_BIT; - /* Fall through */ - default: - *vcpu_cpsr(vcpu) |= PSR_I_BIT; - } - - *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); - - if (sctlr & SCTLR_TE) - *vcpu_cpsr(vcpu) |= PSR_T_BIT; - if (sctlr & SCTLR_EE) - *vcpu_cpsr(vcpu) |= PSR_E_BIT; - - /* Note: These now point to the mode banked copies */ - *vcpu_spsr(vcpu) = cpsr; -} - -/** - * kvm_inject_undefined - inject an undefined exception into the guest - * @vcpu: The VCPU to receive the undefined exception - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - * - * Modelled after TakeUndefInstrException() pseudocode. - */ -void kvm_inject_undefined(struct kvm_vcpu *vcpu) -{ - unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_thumb = (cpsr & PSR_T_BIT); - u32 vect_offset = 4; - u32 return_offset = (is_thumb) ? 2 : 4; - - kvm_update_psr(vcpu, UND_MODE); - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset; - - /* Branch to exception vector */ - *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; -} - -/* - * Modelled after TakeDataAbortException() and TakePrefetchAbortException - * pseudocode. - */ -static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) -{ - unsigned long cpsr = *vcpu_cpsr(vcpu); - bool is_thumb = (cpsr & PSR_T_BIT); - u32 vect_offset; - u32 return_offset = (is_thumb) ? 4 : 0; - bool is_lpae; - - kvm_update_psr(vcpu, ABT_MODE); - *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; - - if (is_pabt) - vect_offset = 12; - else - vect_offset = 16; - - /* Branch to exception vector */ - *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; - - if (is_pabt) { - /* Set IFAR and IFSR */ - vcpu_cp15(vcpu, c6_IFAR) = addr; - is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); - /* Always give debug fault for now - should give guest a clue */ - if (is_lpae) - vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; - else - vcpu_cp15(vcpu, c5_IFSR) = 2; - } else { /* !iabt */ - /* Set DFAR and DFSR */ - vcpu_cp15(vcpu, c6_DFAR) = addr; - is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); - /* Always give debug fault for now - should give guest a clue */ - if (is_lpae) - vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; - else - vcpu_cp15(vcpu, c5_DFSR) = 2; - } - -} - -/** - * kvm_inject_dabt - inject a data abort into the guest - * @vcpu: The VCPU to receive the undefined exception - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - inject_abt(vcpu, false, addr); -} - -/** - * kvm_inject_pabt - inject a prefetch abort into the guest - * @vcpu: The VCPU to receive the undefined exception - * @addr: The address to report in the DFAR - * - * It is assumed that this code is called from the VCPU thread and that the - * VCPU therefore is not currently executing guest code. - */ -void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) -{ - inject_abt(vcpu, true, addr); -} - /** * kvm_inject_vabt - inject an async abort / SError into the guest * @vcpu: The VCPU to receive the exception diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index e5df3fce00082..bf61da0ef82b7 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -41,6 +41,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); void kvm_inject_vabt(struct kvm_vcpu *vcpu); void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_undef32(struct kvm_vcpu *vcpu); +void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); +void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index da6a8cfa54a08..8ecbcb40e3179 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c @@ -33,74 +33,6 @@ #define LOWER_EL_AArch64_VECTOR 0x400 #define LOWER_EL_AArch32_VECTOR 0x600 -static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) -{ - unsigned long cpsr; - unsigned long new_spsr_value = *vcpu_cpsr(vcpu); - bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); - u32 return_offset = (is_thumb) ? 4 : 0; - u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); - - cpsr = mode | COMPAT_PSR_I_BIT; - - if (sctlr & (1 << 30)) - cpsr |= COMPAT_PSR_T_BIT; - if (sctlr & (1 << 25)) - cpsr |= COMPAT_PSR_E_BIT; - - *vcpu_cpsr(vcpu) = cpsr; - - /* Note: These now point to the banked copies */ - *vcpu_spsr(vcpu) = new_spsr_value; - *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; - - /* Branch to exception vector */ - if (sctlr & (1 << 13)) - vect_offset += 0xffff0000; - else /* always have security exceptions */ - vect_offset += vcpu_cp15(vcpu, c12_VBAR); - - *vcpu_pc(vcpu) = vect_offset; -} - -static void inject_undef32(struct kvm_vcpu *vcpu) -{ - prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); -} - -/* - * Modelled after TakeDataAbortException() and TakePrefetchAbortException - * pseudocode. - */ -static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, - unsigned long addr) -{ - u32 vect_offset; - u32 *far, *fsr; - bool is_lpae; - - if (is_pabt) { - vect_offset = 12; - far = &vcpu_cp15(vcpu, c6_IFAR); - fsr = &vcpu_cp15(vcpu, c5_IFSR); - } else { /* !iabt */ - vect_offset = 16; - far = &vcpu_cp15(vcpu, c6_DFAR); - fsr = &vcpu_cp15(vcpu, c5_DFSR); - } - - prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); - - *far = addr; - - /* Give the guest an IMPLEMENTATION DEFINED exception */ - is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); - if (is_lpae) - *fsr = 1 << 9 | 0x34; - else - *fsr = 0x14; -} - enum exception_type { except_type_sync = 0, except_type_irq = 0x80, @@ -197,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) - inject_abt32(vcpu, false, addr); + kvm_inject_dabt32(vcpu, addr); else inject_abt64(vcpu, false, addr); } @@ -213,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) - inject_abt32(vcpu, true, addr); + kvm_inject_pabt32(vcpu, addr); else inject_abt64(vcpu, true, addr); } @@ -227,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) void kvm_inject_undefined(struct kvm_vcpu *vcpu) { if (!(vcpu->arch.hcr_el2 & HCR_RW)) - inject_undef32(vcpu); + kvm_inject_undef32(vcpu); else inject_undef64(vcpu); } diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index 79c7c357804bc..8bc479fa37e65 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c @@ -25,11 +25,6 @@ #include #include -#ifndef CONFIG_ARM64 -#define COMPAT_PSR_T_BIT PSR_T_BIT -#define COMPAT_PSR_IT_MASK PSR_IT_MASK -#endif - /* * stolen from arch/arm/kernel/opcodes.c * @@ -150,3 +145,95 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) *vcpu_pc(vcpu) += 4; kvm_adjust_itstate(vcpu); } + +/* + * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. + */ +static const u8 return_offsets[8][2] = { + [0] = { 0, 0 }, /* Reset, unused */ + [1] = { 4, 2 }, /* Undefined */ + [2] = { 0, 0 }, /* SVC, unused */ + [3] = { 4, 4 }, /* Prefetch abort */ + [4] = { 8, 8 }, /* Data abort */ + [5] = { 0, 0 }, /* HVC, unused */ + [6] = { 4, 4 }, /* IRQ, unused */ + [7] = { 4, 4 }, /* FIQ, unused */ +}; + +static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) +{ + unsigned long cpsr; + unsigned long new_spsr_value = *vcpu_cpsr(vcpu); + bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); + u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; + u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); + + cpsr = mode | COMPAT_PSR_I_BIT; + + if (sctlr & (1 << 30)) + cpsr |= COMPAT_PSR_T_BIT; + if (sctlr & (1 << 25)) + cpsr |= COMPAT_PSR_E_BIT; + + *vcpu_cpsr(vcpu) = cpsr; + + /* Note: These now point to the banked copies */ + *vcpu_spsr(vcpu) = new_spsr_value; + *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; + + /* Branch to exception vector */ + if (sctlr & (1 << 13)) + vect_offset += 0xffff0000; + else /* always have security exceptions */ + vect_offset += vcpu_cp15(vcpu, c12_VBAR); + + *vcpu_pc(vcpu) = vect_offset; +} + +void kvm_inject_undef32(struct kvm_vcpu *vcpu) +{ + prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); +} + +/* + * Modelled after TakeDataAbortException() and TakePrefetchAbortException + * pseudocode. + */ +static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, + unsigned long addr) +{ + u32 vect_offset; + u32 *far, *fsr; + bool is_lpae; + + if (is_pabt) { + vect_offset = 12; + far = &vcpu_cp15(vcpu, c6_IFAR); + fsr = &vcpu_cp15(vcpu, c5_IFSR); + } else { /* !iabt */ + vect_offset = 16; + far = &vcpu_cp15(vcpu, c6_DFAR); + fsr = &vcpu_cp15(vcpu, c5_DFSR); + } + + prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); + + *far = addr; + + /* Give the guest an IMPLEMENTATION DEFINED exception */ + is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); + if (is_lpae) + *fsr = 1 << 9 | 0x34; + else + *fsr = 0x14; +} + +void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) +{ + inject_abt32(vcpu, false, addr); +} + +void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr) +{ + inject_abt32(vcpu, true, addr); +} From a2b83133339067c1b27f902e32506ab2871e2320 Mon Sep 17 00:00:00 2001 From: Dongjiu Geng Date: Mon, 30 Oct 2017 14:05:18 +0800 Subject: [PATCH 26/26] KVM: arm/arm64: fix the incompatible matching for external abort kvm_vcpu_dabt_isextabt() tries to match a full fault syndrome, but calls kvm_vcpu_trap_get_fault_type() that only returns the fault class, thus reducing the scope of the check. This doesn't cause any observable bug yet as we end-up matching a closely related syndrome for which we return the same value. Using kvm_vcpu_trap_get_fault() instead fixes it for good. Signed-off-by: Dongjiu Geng Acked-by: Marc Zyngier Signed-off-by: Christoffer Dall --- arch/arm/include/asm/kvm_emulate.h | 2 +- arch/arm64/include/asm/kvm_emulate.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index dcae3970148d0..3d22eb87f919a 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h @@ -233,7 +233,7 @@ static inline u8 kvm_vcpu_trap_get_fault_type(struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_isextabt(struct kvm_vcpu *vcpu) { - switch (kvm_vcpu_trap_get_fault_type(vcpu)) { + switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: case FSC_SEA_TTW0: case FSC_SEA_TTW1: diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index bf61da0ef82b7..5f28dfa14cee2 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -240,7 +240,7 @@ static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu) static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu) { - switch (kvm_vcpu_trap_get_fault_type(vcpu)) { + switch (kvm_vcpu_trap_get_fault(vcpu)) { case FSC_SEA: case FSC_SEA_TTW0: case FSC_SEA_TTW1: