Skip to content

Commit

Permalink
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Browse files Browse the repository at this point in the history
Pull more KVM updates from Paolo Bonzini:

 - PPC secure guest support

 - small x86 cleanup

 - fix for an x86-specific out-of-bounds write on a ioctl (not guest
   triggerable, data not attacker-controlled)

* tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  kvm: vmx: Stop wasting a page for guest_msrs
  KVM: x86: fix out-of-bounds write in KVM_GET_EMULATED_CPUID (CVE-2019-19332)
  Documentation: kvm: Fix mention to number of ioctls classes
  powerpc: Ultravisor: Add PPC_UV config option
  KVM: PPC: Book3S HV: Support reset of secure guest
  KVM: PPC: Book3S HV: Handle memory plug/unplug to secure VM
  KVM: PPC: Book3S HV: Radix changes for secure guest
  KVM: PPC: Book3S HV: Shared pages support for secure guests
  KVM: PPC: Book3S HV: Support for running secure guests
  mm: ksm: Export ksm_madvise()
  KVM x86: Move kvm cpuid support out of svm
  • Loading branch information
Linus Torvalds committed Dec 4, 2019
2 parents 6cdc7f2 + 7d73710 commit aedc065
Show file tree
Hide file tree
Showing 19 changed files with 1,156 additions and 20 deletions.
20 changes: 19 additions & 1 deletion Documentation/virt/kvm/api.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
----------------------

The kvm API is a set of ioctls that are issued to control various aspects
of a virtual machine. The ioctls belong to three classes:
of a virtual machine. The ioctls belong to the following classes:

- System ioctls: These query and set global attributes which affect the
whole kvm subsystem. In addition a system ioctl is used to create
Expand Down Expand Up @@ -4149,6 +4149,24 @@ Valid values for 'action':
#define KVM_PMU_EVENT_ALLOW 0
#define KVM_PMU_EVENT_DENY 1

4.121 KVM_PPC_SVM_OFF

Capability: basic
Architectures: powerpc
Type: vm ioctl
Parameters: none
Returns: 0 on successful completion,
Errors:
EINVAL: if ultravisor failed to terminate the secure guest
ENOMEM: if hypervisor failed to allocate new radix page tables for guest

This ioctl is used to turn off the secure mode of the guest or transition
the guest from secure mode to normal mode. This is invoked when the guest
is reset. This has no effect if called for a normal guest.

This ioctl issues an ultravisor call to terminate the secure guest,
unpins the VPA pages and releases all the device pages that are used to
track the secure pages by hypervisor.

5. The kvm_run structure
------------------------
Expand Down
17 changes: 17 additions & 0 deletions arch/powerpc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -452,6 +452,23 @@ config PPC_TRANSACTIONAL_MEM
help
Support user-mode Transactional Memory on POWERPC.

config PPC_UV
bool "Ultravisor support"
depends on KVM_BOOK3S_HV_POSSIBLE
select ZONE_DEVICE
select DEV_PAGEMAP_OPS
select DEVICE_PRIVATE
select MEMORY_HOTPLUG
select MEMORY_HOTREMOVE
default n
help
This option paravirtualizes the kernel to run in POWER platforms that
supports the Protected Execution Facility (PEF). On such platforms,
the ultravisor firmware runs at a privilege level above the
hypervisor.

If unsure, say "N".

config LD_HEAD_STUB_CATCH
bool "Reserve 256 bytes to cope with linker stubs in HEAD text" if EXPERT
depends on PPC64
Expand Down
9 changes: 9 additions & 0 deletions arch/powerpc/include/asm/hvcall.h
Original file line number Diff line number Diff line change
Expand Up @@ -342,6 +342,15 @@
#define H_TLB_INVALIDATE 0xF808
#define H_COPY_TOFROM_GUEST 0xF80C

/* Flags for H_SVM_PAGE_IN */
#define H_PAGE_IN_SHARED 0x1

/* Platform-specific hcalls used by the Ultravisor */
#define H_SVM_PAGE_IN 0xEF00
#define H_SVM_PAGE_OUT 0xEF04
#define H_SVM_INIT_START 0xEF08
#define H_SVM_INIT_DONE 0xEF0C

/* Values for 2nd argument to H_SET_MODE */
#define H_SET_MODE_RESOURCE_SET_CIABR 1
#define H_SET_MODE_RESOURCE_SET_DAWR 2
Expand Down
74 changes: 74 additions & 0 deletions arch/powerpc/include/asm/kvm_book3s_uvmem.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_KVM_BOOK3S_UVMEM_H__
#define __ASM_KVM_BOOK3S_UVMEM_H__

#ifdef CONFIG_PPC_UV
int kvmppc_uvmem_init(void);
void kvmppc_uvmem_free(void);
int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot);
void kvmppc_uvmem_slot_free(struct kvm *kvm,
const struct kvm_memory_slot *slot);
unsigned long kvmppc_h_svm_page_in(struct kvm *kvm,
unsigned long gra,
unsigned long flags,
unsigned long page_shift);
unsigned long kvmppc_h_svm_page_out(struct kvm *kvm,
unsigned long gra,
unsigned long flags,
unsigned long page_shift);
unsigned long kvmppc_h_svm_init_start(struct kvm *kvm);
unsigned long kvmppc_h_svm_init_done(struct kvm *kvm);
int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn);
void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
struct kvm *kvm);
#else
static inline int kvmppc_uvmem_init(void)
{
return 0;
}

static inline void kvmppc_uvmem_free(void) { }

static inline int
kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
{
return 0;
}

static inline void
kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot) { }

static inline unsigned long
kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gra,
unsigned long flags, unsigned long page_shift)
{
return H_UNSUPPORTED;
}

static inline unsigned long
kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gra,
unsigned long flags, unsigned long page_shift)
{
return H_UNSUPPORTED;
}

static inline unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
{
return H_UNSUPPORTED;
}

static inline unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
{
return H_UNSUPPORTED;
}

static inline int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
{
return -EFAULT;
}

static inline void
kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *free,
struct kvm *kvm) { }
#endif /* CONFIG_PPC_UV */
#endif /* __ASM_KVM_BOOK3S_UVMEM_H__ */
6 changes: 6 additions & 0 deletions arch/powerpc/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -275,6 +275,10 @@ struct kvm_hpt_info {

struct kvm_resize_hpt;

/* Flag values for kvm_arch.secure_guest */
#define KVMPPC_SECURE_INIT_START 0x1 /* H_SVM_INIT_START has been called */
#define KVMPPC_SECURE_INIT_DONE 0x2 /* H_SVM_INIT_DONE completed */

struct kvm_arch {
unsigned int lpid;
unsigned int smt_mode; /* # vcpus per virtual core */
Expand Down Expand Up @@ -330,6 +334,8 @@ struct kvm_arch {
#endif
struct kvmppc_ops *kvm_ops;
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
struct mutex uvmem_lock;
struct list_head uvmem_pfns;
struct mutex mmu_setup_lock; /* nests inside vcpu mutexes */
u64 l1_ptcr;
int max_nested_lpid;
Expand Down
1 change: 1 addition & 0 deletions arch/powerpc/include/asm/kvm_ppc.h
Original file line number Diff line number Diff line change
Expand Up @@ -322,6 +322,7 @@ struct kvmppc_ops {
int size);
int (*store_to_eaddr)(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr,
int size);
int (*svm_off)(struct kvm *kvm);
};

extern struct kvmppc_ops *kvmppc_hv_ops;
Expand Down
6 changes: 6 additions & 0 deletions arch/powerpc/include/asm/ultravisor-api.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,8 +26,14 @@
#define UV_WRITE_PATE 0xF104
#define UV_RETURN 0xF11C
#define UV_ESM 0xF110
#define UV_REGISTER_MEM_SLOT 0xF120
#define UV_UNREGISTER_MEM_SLOT 0xF124
#define UV_PAGE_IN 0xF128
#define UV_PAGE_OUT 0xF12C
#define UV_SHARE_PAGE 0xF130
#define UV_UNSHARE_PAGE 0xF134
#define UV_UNSHARE_ALL_PAGES 0xF140
#define UV_PAGE_INVAL 0xF138
#define UV_SVM_TERMINATE 0xF13C

#endif /* _ASM_POWERPC_ULTRAVISOR_API_H */
36 changes: 36 additions & 0 deletions arch/powerpc/include/asm/ultravisor.h
Original file line number Diff line number Diff line change
Expand Up @@ -46,4 +46,40 @@ static inline int uv_unshare_all_pages(void)
return ucall_norets(UV_UNSHARE_ALL_PAGES);
}

static inline int uv_page_in(u64 lpid, u64 src_ra, u64 dst_gpa, u64 flags,
u64 page_shift)
{
return ucall_norets(UV_PAGE_IN, lpid, src_ra, dst_gpa, flags,
page_shift);
}

static inline int uv_page_out(u64 lpid, u64 dst_ra, u64 src_gpa, u64 flags,
u64 page_shift)
{
return ucall_norets(UV_PAGE_OUT, lpid, dst_ra, src_gpa, flags,
page_shift);
}

static inline int uv_register_mem_slot(u64 lpid, u64 start_gpa, u64 size,
u64 flags, u64 slotid)
{
return ucall_norets(UV_REGISTER_MEM_SLOT, lpid, start_gpa,
size, flags, slotid);
}

static inline int uv_unregister_mem_slot(u64 lpid, u64 slotid)
{
return ucall_norets(UV_UNREGISTER_MEM_SLOT, lpid, slotid);
}

static inline int uv_page_inval(u64 lpid, u64 gpa, u64 page_shift)
{
return ucall_norets(UV_PAGE_INVAL, lpid, gpa, page_shift);
}

static inline int uv_svm_terminate(u64 lpid)
{
return ucall_norets(UV_SVM_TERMINATE, lpid);
}

#endif /* _ASM_POWERPC_ULTRAVISOR_H */
3 changes: 3 additions & 0 deletions arch/powerpc/kvm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,9 @@ kvm-hv-y += \
book3s_64_mmu_radix.o \
book3s_hv_nested.o

kvm-hv-$(CONFIG_PPC_UV) += \
book3s_hv_uvmem.o

kvm-hv-$(CONFIG_PPC_TRANSACTIONAL_MEM) += \
book3s_hv_tm.o

Expand Down
25 changes: 25 additions & 0 deletions arch/powerpc/kvm/book3s_64_mmu_radix.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,8 @@
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/pte-walk.h>
#include <asm/ultravisor.h>
#include <asm/kvm_book3s_uvmem.h>

/*
* Supported radix tree geometry.
Expand Down Expand Up @@ -915,6 +917,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
if (!(dsisr & DSISR_PRTABLE_FAULT))
gpa |= ea & 0xfff;

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return kvmppc_send_page_to_uv(kvm, gfn);

/* Get the corresponding memslot */
memslot = gfn_to_memslot(kvm, gfn);

Expand Down Expand Up @@ -972,6 +977,11 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) {
uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT);
return 0;
}

ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot,
Expand All @@ -989,6 +999,9 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
int ref = 0;
unsigned long old, *rmapp;

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref;

ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
Expand All @@ -1013,6 +1026,9 @@ int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned int shift;
int ref = 0;

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ref;

ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep))
ref = 1;
Expand All @@ -1030,6 +1046,9 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
int ret = 0;
unsigned long old, *rmapp;

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return ret;

ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
Expand Down Expand Up @@ -1082,6 +1101,12 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm,
unsigned long gpa;
unsigned int shift;

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)
kvmppc_uvmem_drop_pages(memslot, kvm);

if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
return;

gpa = memslot->base_gfn << PAGE_SHIFT;
spin_lock(&kvm->mmu_lock);
for (n = memslot->npages; n; --n) {
Expand Down
Loading

0 comments on commit aedc065

Please sign in to comment.