Skip to content

Commit

Permalink
KVM: arm/arm64: Fix VMID alloc race by reverting to lock-less
Browse files Browse the repository at this point in the history
commit fb544d1 upstream.

We recently addressed a VMID generation race by introducing a read/write
lock around accesses and updates to the vmid generation values.

However, kvm_arch_vcpu_ioctl_run() also calls need_new_vmid_gen() but
does so without taking the read lock.

As far as I can tell, this can lead to the same kind of race:

  VM 0, VCPU 0			VM 0, VCPU 1
  ------------			------------
  update_vttbr (vmid 254)
  				update_vttbr (vmid 1) // roll over
				read_lock(kvm_vmid_lock);
				force_vm_exit()
  local_irq_disable
  need_new_vmid_gen == false //because vmid gen matches

  enter_guest (vmid 254)
  				kvm_arch.vttbr = <PGD>:<VMID 1>
				read_unlock(kvm_vmid_lock);

  				enter_guest (vmid 1)

Which results in running two VCPUs in the same VM with different VMIDs
and (even worse) other VCPUs from other VMs could now allocate clashing
VMID 254 from the new generation as long as VCPU 0 is not exiting.

Attempt to solve this by making sure vttbr is updated before another CPU
can observe the updated VMID generation.

Cc: stable@vger.kernel.org
Fixes: f0cf47d "KVM: arm/arm64: Close VMID generation race"
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Signed-off-by: Christoffer Dall <christoffer.dall@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
  • Loading branch information
Christoffer Dall authored and Greg Kroah-Hartman committed Jan 16, 2019
1 parent 65dba32 commit cb754d6
Showing 1 changed file with 11 additions and 12 deletions.
23 changes: 11 additions & 12 deletions virt/kvm/arm/arm.c
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ static DEFINE_PER_CPU(struct kvm_vcpu *, kvm_arm_running_vcpu);
static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
static u32 kvm_next_vmid;
static unsigned int kvm_vmid_bits __read_mostly;
static DEFINE_RWLOCK(kvm_vmid_lock);
static DEFINE_SPINLOCK(kvm_vmid_lock);

static bool vgic_present;

Expand Down Expand Up @@ -447,7 +447,9 @@ void force_vm_exit(const cpumask_t *mask)
*/
static bool need_new_vmid_gen(struct kvm *kvm)
{
return unlikely(kvm->arch.vmid_gen != atomic64_read(&kvm_vmid_gen));
u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
return unlikely(READ_ONCE(kvm->arch.vmid_gen) != current_vmid_gen);
}

/**
Expand All @@ -462,24 +464,19 @@ static void update_vttbr(struct kvm *kvm)
{
phys_addr_t pgd_phys;
u64 vmid;
bool new_gen;

read_lock(&kvm_vmid_lock);
new_gen = need_new_vmid_gen(kvm);
read_unlock(&kvm_vmid_lock);

if (!new_gen)
if (!need_new_vmid_gen(kvm))
return;

write_lock(&kvm_vmid_lock);
spin_lock(&kvm_vmid_lock);

/*
* We need to re-check the vmid_gen here to ensure that if another vcpu
* already allocated a valid vmid for this vm, then this vcpu should
* use the same vmid.
*/
if (!need_new_vmid_gen(kvm)) {
write_unlock(&kvm_vmid_lock);
spin_unlock(&kvm_vmid_lock);
return;
}

Expand All @@ -502,7 +499,6 @@ static void update_vttbr(struct kvm *kvm)
kvm_call_hyp(__kvm_flush_vm_context);
}

kvm->arch.vmid_gen = atomic64_read(&kvm_vmid_gen);
kvm->arch.vmid = kvm_next_vmid;
kvm_next_vmid++;
kvm_next_vmid &= (1 << kvm_vmid_bits) - 1;
Expand All @@ -513,7 +509,10 @@ static void update_vttbr(struct kvm *kvm)
vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits);
kvm->arch.vttbr = pgd_phys | vmid;

write_unlock(&kvm_vmid_lock);
smp_wmb();
WRITE_ONCE(kvm->arch.vmid_gen, atomic64_read(&kvm_vmid_gen));

spin_unlock(&kvm_vmid_lock);
}

static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
Expand Down

0 comments on commit cb754d6

Please sign in to comment.