Skip to content

Commit

Permalink
KVM: arm/arm64: vgic-new: Add IRQ sync/flush framework
Browse files Browse the repository at this point in the history
Implement the framework for syncing IRQs between our emulation and
the list registers, which represent the guest's view of IRQs.
This is done in kvm_vgic_flush_hwstate and kvm_vgic_sync_hwstate,
which gets called on guest entry and exit.
The code talking to the actual GICv2/v3 hardware is added in the
following patches.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Eric Auger <eric.auger@linaro.org>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Eric Auger <eric.auger@linaro.org>
Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>
  • Loading branch information
Marc Zyngier authored and Christoffer Dall committed May 20, 2016
1 parent 8e44474 commit 0919e84
Show file tree
Hide file tree
Showing 3 changed files with 198 additions and 0 deletions.
4 changes: 4 additions & 0 deletions include/kvm/vgic/vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -190,6 +190,10 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
#define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \
((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS))

bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu);
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);

/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
*
Expand Down
192 changes: 192 additions & 0 deletions virt/kvm/arm/vgic/vgic.c
Original file line number Diff line number Diff line change
Expand Up @@ -307,3 +307,195 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int intid,
{
return vgic_update_irq_pending(kvm, cpuid, intid, level, false);
}

/**
* vgic_prune_ap_list - Remove non-relevant interrupts from the list
*
* @vcpu: The VCPU pointer
*
* Go over the list of "interesting" interrupts, and prune those that we
* won't have to consider in the near future.
*/
static void vgic_prune_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq, *tmp;

retry:
spin_lock(&vgic_cpu->ap_list_lock);

list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) {
struct kvm_vcpu *target_vcpu, *vcpuA, *vcpuB;

spin_lock(&irq->irq_lock);

BUG_ON(vcpu != irq->vcpu);

target_vcpu = vgic_target_oracle(irq);

if (!target_vcpu) {
/*
* We don't need to process this interrupt any
* further, move it off the list.
*/
list_del(&irq->ap_list);
irq->vcpu = NULL;
spin_unlock(&irq->irq_lock);
continue;
}

if (target_vcpu == vcpu) {
/* We're on the right CPU */
spin_unlock(&irq->irq_lock);
continue;
}

/* This interrupt looks like it has to be migrated. */

spin_unlock(&irq->irq_lock);
spin_unlock(&vgic_cpu->ap_list_lock);

/*
* Ensure locking order by always locking the smallest
* ID first.
*/
if (vcpu->vcpu_id < target_vcpu->vcpu_id) {
vcpuA = vcpu;
vcpuB = target_vcpu;
} else {
vcpuA = target_vcpu;
vcpuB = vcpu;
}

spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock);
spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock,
SINGLE_DEPTH_NESTING);
spin_lock(&irq->irq_lock);

/*
* If the affinity has been preserved, move the
* interrupt around. Otherwise, it means things have
* changed while the interrupt was unlocked, and we
* need to replay this.
*
* In all cases, we cannot trust the list not to have
* changed, so we restart from the beginning.
*/
if (target_vcpu == vgic_target_oracle(irq)) {
struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu;

list_del(&irq->ap_list);
irq->vcpu = target_vcpu;
list_add_tail(&irq->ap_list, &new_cpu->ap_list_head);
}

spin_unlock(&irq->irq_lock);
spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock);
spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock);
goto retry;
}

spin_unlock(&vgic_cpu->ap_list_lock);
}

static inline void vgic_process_maintenance_interrupt(struct kvm_vcpu *vcpu)
{
}

static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
{
}

/* Requires the irq_lock to be held. */
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
struct vgic_irq *irq, int lr)
{
DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
}

static inline void vgic_clear_lr(struct kvm_vcpu *vcpu, int lr)
{
}

static inline void vgic_set_underflow(struct kvm_vcpu *vcpu)
{
}

/* Requires the ap_list_lock to be held. */
static int compute_ap_list_depth(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
int count = 0;

DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));

list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock);
/* GICv2 SGIs can count for more than one... */
if (vgic_irq_is_sgi(irq->intid) && irq->source)
count += hweight8(irq->source);
else
count++;
spin_unlock(&irq->irq_lock);
}
return count;
}

/* Requires the VCPU's ap_list_lock to be held. */
static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_irq *irq;
int count = 0;

DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));

if (compute_ap_list_depth(vcpu) > kvm_vgic_global_state.nr_lr) {
vgic_set_underflow(vcpu);
vgic_sort_ap_list(vcpu);
}

list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
spin_lock(&irq->irq_lock);

if (unlikely(vgic_target_oracle(irq) != vcpu))
goto next;

/*
* If we get an SGI with multiple sources, try to get
* them in all at once.
*/
do {
vgic_populate_lr(vcpu, irq, count++);
} while (irq->source && count < kvm_vgic_global_state.nr_lr);

next:
spin_unlock(&irq->irq_lock);

if (count == kvm_vgic_global_state.nr_lr)
break;
}

vcpu->arch.vgic_cpu.used_lrs = count;

/* Nuke remaining LRs */
for ( ; count < kvm_vgic_global_state.nr_lr; count++)
vgic_clear_lr(vcpu, count);
}

/* Sync back the hardware VGIC state into our emulation after a guest's run. */
void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
{
vgic_process_maintenance_interrupt(vcpu);
vgic_fold_lr_state(vcpu);
vgic_prune_ap_list(vcpu);
}

/* Flush our emulation state into the GIC hardware before entering the guest. */
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
{
spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock);
vgic_flush_lr_state(vcpu);
spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock);
}
2 changes: 2 additions & 0 deletions virt/kvm/arm/vgic/vgic.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@
#ifndef __KVM_ARM_VGIC_NEW_H__
#define __KVM_ARM_VGIC_NEW_H__

#define vgic_irq_is_sgi(intid) ((intid) < VGIC_NR_SGIS)

struct vgic_irq *vgic_get_irq(struct kvm *kvm, struct kvm_vcpu *vcpu,
u32 intid);
bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq);
Expand Down

0 comments on commit 0919e84

Please sign in to comment.