Skip to content

Commit

Permalink
Merge tag 'kvm-ppc-fixes-4.15-3' of git://git.kernel.org/pub/scm/linu…
Browse files Browse the repository at this point in the history
…x/kernel/git/paulus/powerpc into kvm-master

PPC KVM fixes for 4.15

Four commits here, including two that were tagged but never merged.
Three of them are for the HPT resizing code; two of those fix a
user-triggerable use-after-free in the host, and one that fixes
stale TLB entries in the guest.  The remaining commit fixes a bug
causing PR KVM guests under PowerVM to fail to start.
  • Loading branch information
Paolo Bonzini committed Jan 11, 2018
2 parents 2a266f2 + ecba829 commit 0217690
Show file tree
Hide file tree
Showing 3 changed files with 64 additions and 29 deletions.
1 change: 1 addition & 0 deletions arch/powerpc/kvm/book3s_64_mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,7 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
gpte->may_read = true;
gpte->may_write = true;
gpte->page_size = MMU_PAGE_4K;
gpte->wimg = HPTE_R_M;

return 0;
}
Expand Down
90 changes: 61 additions & 29 deletions arch/powerpc/kvm/book3s_64_mmu_hv.c
Original file line number Diff line number Diff line change
Expand Up @@ -65,11 +65,17 @@ struct kvm_resize_hpt {
u32 order;

/* These fields protected by kvm->lock */

/* Possible values and their usage:
* <0 an error occurred during allocation,
* -EBUSY allocation is in the progress,
* 0 allocation made successfuly.
*/
int error;
bool prepare_done;

/* Private to the work thread, until prepare_done is true,
* then protected by kvm->resize_hpt_sem */
/* Private to the work thread, until error != -EBUSY,
* then protected by kvm->lock.
*/
struct kvm_hpt_info hpt;
};

Expand Down Expand Up @@ -159,8 +165,6 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
* Reset all the reverse-mapping chains for all memslots
*/
kvmppc_rmap_reset(kvm);
/* Ensure that each vcpu will flush its TLB on next entry. */
cpumask_setall(&kvm->arch.need_tlb_flush);
err = 0;
goto out;
}
Expand All @@ -176,6 +180,10 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order)
kvmppc_set_hpt(kvm, &info);

out:
if (err == 0)
/* Ensure that each vcpu will flush its TLB on next entry. */
cpumask_setall(&kvm->arch.need_tlb_flush);

mutex_unlock(&kvm->lock);
return err;
}
Expand Down Expand Up @@ -1413,16 +1421,20 @@ static void resize_hpt_pivot(struct kvm_resize_hpt *resize)

static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize)
{
BUG_ON(kvm->arch.resize_hpt != resize);
if (WARN_ON(!mutex_is_locked(&kvm->lock)))
return;

if (!resize)
return;

if (resize->hpt.virt)
kvmppc_free_hpt(&resize->hpt);
if (resize->error != -EBUSY) {
if (resize->hpt.virt)
kvmppc_free_hpt(&resize->hpt);
kfree(resize);
}

kvm->arch.resize_hpt = NULL;
kfree(resize);
if (kvm->arch.resize_hpt == resize)
kvm->arch.resize_hpt = NULL;
}

static void resize_hpt_prepare_work(struct work_struct *work)
Expand All @@ -1431,17 +1443,41 @@ static void resize_hpt_prepare_work(struct work_struct *work)
struct kvm_resize_hpt,
work);
struct kvm *kvm = resize->kvm;
int err;
int err = 0;

resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
resize->order);

err = resize_hpt_allocate(resize);
if (WARN_ON(resize->error != -EBUSY))
return;

mutex_lock(&kvm->lock);

/* Request is still current? */
if (kvm->arch.resize_hpt == resize) {
/* We may request large allocations here:
* do not sleep with kvm->lock held for a while.
*/
mutex_unlock(&kvm->lock);

resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n",
resize->order);

err = resize_hpt_allocate(resize);

/* We have strict assumption about -EBUSY
* when preparing for HPT resize.
*/
if (WARN_ON(err == -EBUSY))
err = -EINPROGRESS;

mutex_lock(&kvm->lock);
/* It is possible that kvm->arch.resize_hpt != resize
* after we grab kvm->lock again.
*/
}

resize->error = err;
resize->prepare_done = true;

if (kvm->arch.resize_hpt != resize)
resize_hpt_release(kvm, resize);

mutex_unlock(&kvm->lock);
}
Expand All @@ -1466,14 +1502,12 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,

if (resize) {
if (resize->order == shift) {
/* Suitable resize in progress */
if (resize->prepare_done) {
ret = resize->error;
if (ret != 0)
resize_hpt_release(kvm, resize);
} else {
/* Suitable resize in progress? */
ret = resize->error;
if (ret == -EBUSY)
ret = 100; /* estimated time in ms */
}
else if (ret)
resize_hpt_release(kvm, resize);

goto out;
}
Expand All @@ -1493,6 +1527,8 @@ long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
ret = -ENOMEM;
goto out;
}

resize->error = -EBUSY;
resize->order = shift;
resize->kvm = kvm;
INIT_WORK(&resize->work, resize_hpt_prepare_work);
Expand Down Expand Up @@ -1547,16 +1583,12 @@ long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
if (!resize || (resize->order != shift))
goto out;

ret = -EBUSY;
if (!resize->prepare_done)
goto out;

ret = resize->error;
if (ret != 0)
if (ret)
goto out;

ret = resize_hpt_rehash(resize);
if (ret != 0)
if (ret)
goto out;

resize_hpt_pivot(resize);
Expand Down
2 changes: 2 additions & 0 deletions arch/powerpc/kvm/book3s_pr.c
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
#define MSR_USER32 MSR_USER
#define MSR_USER64 MSR_USER
#define HW_PAGE_SIZE PAGE_SIZE
#define HPTE_R_M _PAGE_COHERENT
#endif

static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -557,6 +558,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
pte.eaddr = eaddr;
pte.vpage = eaddr >> 12;
pte.page_size = MMU_PAGE_64K;
pte.wimg = HPTE_R_M;
}

switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Expand Down

0 comments on commit 0217690

Please sign in to comment.