Skip to content

Commit

Permalink
KVM: arm64: vgic-v4: Move the GICv4 residency flow to be driven by vc…
Browse files Browse the repository at this point in the history
…pu_load/put

When the VHE code was reworked, a lot of the vgic stuff was moved around,
but the GICv4 residency code did stay untouched, meaning that we come
in and out of residency on each flush/sync, which is obviously suboptimal.

To address this, let's move things around a bit:

- Residency entry (flush) moves to vcpu_load
- Residency exit (sync) moves to vcpu_put
- On blocking (entry to WFI), we "put"
- On unblocking (exit from WFI), we "load"

Because these can nest (load/block/put/load/unblock/put, for example),
we now have per-VPE tracking of the residency state.

Additionally, vgic_v4_put gains a "need doorbell" parameter, which only
gets set to true when blocking because of a WFI. This allows a finer
control of the doorbell, which now also gets disabled as soon as
it gets signaled.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20191027144234.8395-2-maz@kernel.org
  • Loading branch information
Marc Zyngier committed Oct 28, 2019
1 parent 5c40130 commit 8e01d9a
Show file tree
Hide file tree
Showing 8 changed files with 48 additions and 42 deletions.
7 changes: 6 additions & 1 deletion drivers/irqchip/irq-gic-v4.c
Original file line number Diff line number Diff line change
Expand Up @@ -141,12 +141,17 @@ static int its_send_vpe_cmd(struct its_vpe *vpe, struct its_cmd_info *info)
int its_schedule_vpe(struct its_vpe *vpe, bool on)
{
struct its_cmd_info info;
int ret;

WARN_ON(preemptible());

info.cmd_type = on ? SCHEDULE_VPE : DESCHEDULE_VPE;

return its_send_vpe_cmd(vpe, &info);
ret = its_send_vpe_cmd(vpe, &info);
if (!ret)
vpe->resident = on;

return ret;
}

int its_invall_vpe(struct its_vpe *vpe)
Expand Down
4 changes: 2 additions & 2 deletions include/kvm/arm_vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);

void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
int vgic_v4_load(struct kvm_vcpu *vcpu);
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);

#endif /* __KVM_ARM_VGIC_H */
2 changes: 2 additions & 0 deletions include/linux/irqchip/arm-gic-v4.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@ struct its_vpe {
/* Doorbell interrupt */
int irq;
irq_hw_number_t vpe_db_lpi;
/* VPE resident */
bool resident;
/* VPE proxy mapping */
int vpe_proxy_event;
/*
Expand Down
12 changes: 8 additions & 4 deletions virt/kvm/arm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -322,20 +322,24 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
/*
* If we're about to block (most likely because we've just hit a
* WFI), we need to sync back the state of the GIC CPU interface
* so that we have the lastest PMR and group enables. This ensures
* so that we have the latest PMR and group enables. This ensures
* that kvm_arch_vcpu_runnable has up-to-date data to decide
* whether we have pending interrupts.
*
* For the same reason, we want to tell GICv4 that we need
* doorbells to be signalled, should an interrupt become pending.
*/
preempt_disable();
kvm_vgic_vmcr_sync(vcpu);
vgic_v4_put(vcpu, true);
preempt_enable();

kvm_vgic_v4_enable_doorbell(vcpu);
}

void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
kvm_vgic_v4_disable_doorbell(vcpu);
preempt_disable();
vgic_v4_load(vcpu);
preempt_enable();
}

int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
Expand Down
4 changes: 4 additions & 0 deletions virt/kvm/arm/vgic/vgic-v3.c
Original file line number Diff line number Diff line change
Expand Up @@ -664,6 +664,8 @@ void vgic_v3_load(struct kvm_vcpu *vcpu)

if (has_vhe())
__vgic_v3_activate_traps(vcpu);

WARN_ON(vgic_v4_load(vcpu));
}

void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)
Expand All @@ -676,6 +678,8 @@ void vgic_v3_vmcr_sync(struct kvm_vcpu *vcpu)

void vgic_v3_put(struct kvm_vcpu *vcpu)
{
WARN_ON(vgic_v4_put(vcpu, false));

vgic_v3_vmcr_sync(vcpu);

kvm_call_hyp(__vgic_v3_save_aprs, vcpu);
Expand Down
55 changes: 26 additions & 29 deletions virt/kvm/arm/vgic/vgic-v4.c
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,10 @@ static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info)
{
struct kvm_vcpu *vcpu = info;

/* We got the message, no need to fire again */
if (!irqd_irq_disabled(&irq_to_desc(irq)->irq_data))
disable_irq_nosync(irq);

vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true;
kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
kvm_vcpu_kick(vcpu);
Expand Down Expand Up @@ -192,20 +196,30 @@ void vgic_v4_teardown(struct kvm *kvm)
its_vm->vpes = NULL;
}

int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu)
int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db)
{
if (!vgic_supports_direct_msis(vcpu->kvm))
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
struct irq_desc *desc = irq_to_desc(vpe->irq);

if (!vgic_supports_direct_msis(vcpu->kvm) || !vpe->resident)
return 0;

return its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, false);
/*
* If blocking, a doorbell is required. Undo the nested
* disable_irq() calls...
*/
while (need_db && irqd_irq_disabled(&desc->irq_data))
enable_irq(vpe->irq);

return its_schedule_vpe(vpe, false);
}

int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
int vgic_v4_load(struct kvm_vcpu *vcpu)
{
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe;
int err;

if (!vgic_supports_direct_msis(vcpu->kvm))
if (!vgic_supports_direct_msis(vcpu->kvm) || vpe->resident)
return 0;

/*
Expand All @@ -214,21 +228,22 @@ int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu)
* doc in drivers/irqchip/irq-gic-v4.c to understand how this
* turns into a VMOVP command at the ITS level.
*/
err = irq_set_affinity(irq, cpumask_of(smp_processor_id()));
err = irq_set_affinity(vpe->irq, cpumask_of(smp_processor_id()));
if (err)
return err;

err = its_schedule_vpe(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe, true);
/* Disabled the doorbell, as we're about to enter the guest */
disable_irq_nosync(vpe->irq);

err = its_schedule_vpe(vpe, true);
if (err)
return err;

/*
* Now that the VPE is resident, let's get rid of a potential
* doorbell interrupt that would still be pending.
*/
err = irq_set_irqchip_state(irq, IRQCHIP_STATE_PENDING, false);

return err;
return irq_set_irqchip_state(vpe->irq, IRQCHIP_STATE_PENDING, false);
}

static struct vgic_its *vgic_get_its(struct kvm *kvm,
Expand Down Expand Up @@ -335,21 +350,3 @@ int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int virq,
mutex_unlock(&its->its_lock);
return ret;
}

void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu)
{
if (vgic_supports_direct_msis(vcpu->kvm)) {
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
if (irq)
enable_irq(irq);
}
}

void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu)
{
if (vgic_supports_direct_msis(vcpu->kvm)) {
int irq = vcpu->arch.vgic_cpu.vgic_v3.its_vpe.irq;
if (irq)
disable_irq(irq);
}
}
4 changes: 0 additions & 4 deletions virt/kvm/arm/vgic/vgic.c
Original file line number Diff line number Diff line change
Expand Up @@ -857,8 +857,6 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;

WARN_ON(vgic_v4_sync_hwstate(vcpu));

/* An empty ap_list_head implies used_lrs == 0 */
if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head))
return;
Expand All @@ -882,8 +880,6 @@ static inline void vgic_restore_state(struct kvm_vcpu *vcpu)
/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
WARN_ON(vgic_v4_flush_hwstate(vcpu));

/*
* If there are no virtual interrupts active or pending for this
* VCPU, then there is no work to do and we can bail out without
Expand Down
2 changes: 0 additions & 2 deletions virt/kvm/arm/vgic/vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,5 @@ void vgic_its_invalidate_cache(struct kvm *kvm);
bool vgic_supports_direct_msis(struct kvm *kvm);
int vgic_v4_init(struct kvm *kvm);
void vgic_v4_teardown(struct kvm *kvm);
int vgic_v4_sync_hwstate(struct kvm_vcpu *vcpu);
int vgic_v4_flush_hwstate(struct kvm_vcpu *vcpu);

#endif

0 comments on commit 8e01d9a

Please sign in to comment.