Skip to content

Commit

Permalink
Merge branch 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/k…
Browse files Browse the repository at this point in the history
…vm/kvm

* 'kvm-updates/3.3' of git://git.kernel.org/pub/scm/virt/kvm/kvm: (74 commits)
  KVM: PPC: Whitespace fix for kvm.h
  KVM: Fix whitespace in kvm_para.h
  KVM: PPC: annotate kvm_rma_init as __init
  KVM: x86 emulator: implement RDPMC (0F 33)
  KVM: x86 emulator: fix RDPMC privilege check
  KVM: Expose the architectural performance monitoring CPUID leaf
  KVM: VMX: Intercept RDPMC
  KVM: SVM: Intercept RDPMC
  KVM: Add generic RDPMC support
  KVM: Expose a version 2 architectural PMU to a guests
  KVM: Expose kvm_lapic_local_deliver()
  KVM: x86 emulator: Use opcode::execute for Group 9 instruction
  KVM: x86 emulator: Use opcode::execute for Group 4/5 instructions
  KVM: x86 emulator: Use opcode::execute for Group 1A instruction
  KVM: ensure that debugfs entries have been created
  KVM: drop bsp_vcpu pointer from kvm struct
  KVM: x86: Consolidate PIT legacy test
  KVM: x86: Do not rely on implicit inclusions
  KVM: Make KVM_INTEL depend on CPU_SUP_INTEL
  KVM: Use memdup_user instead of kmalloc/copy_from_user
  ...
  • Loading branch information
Linus Torvalds committed Jan 10, 2012
2 parents e4e1118 + da69dee commit 3dcf6c1
Show file tree
Hide file tree
Showing 37 changed files with 2,372 additions and 1,752 deletions.
9 changes: 0 additions & 9 deletions Documentation/feature-removal-schedule.txt
Original file line number Diff line number Diff line change
Expand Up @@ -350,15 +350,6 @@ Who: anybody or Florian Mickler <florian@mickler.org>

----------------------------

What: KVM paravirt mmu host support
When: January 2011
Why: The paravirt mmu host support is slower than non-paravirt mmu, both
on newer and older hardware. It is already not exposed to the guest,
and kept only for live migration purposes.
Who: Avi Kivity <avi@redhat.com>

----------------------------

What: iwlwifi 50XX module parameters
When: 3.0
Why: The "..50" modules parameters were used to configure 5000 series and
Expand Down
3 changes: 0 additions & 3 deletions Documentation/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1178,9 +1178,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
kvm.ignore_msrs=[KVM] Ignore guest accesses to unhandled MSRs.
Default is 0 (don't ignore, but inject #GP)

kvm.oos_shadow= [KVM] Disable out-of-sync shadow paging.
Default is 1 (enabled)

kvm.mmu_audit= [KVM] This is a R/W parameter which allows audit
KVM MMU at runtime.
Default is 0 (off)
Expand Down
25 changes: 25 additions & 0 deletions Documentation/virtual/kvm/api.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1466,6 +1466,31 @@ is supported; 2 if the processor requires all virtual machines to have
an RMA, or 1 if the processor can use an RMA but doesn't require it,
because it supports the Virtual RMA (VRMA) facility.

4.64 KVM_NMI

Capability: KVM_CAP_USER_NMI
Architectures: x86
Type: vcpu ioctl
Parameters: none
Returns: 0 on success, -1 on error

Queues an NMI on the thread's vcpu. Note this is well defined only
when KVM_CREATE_IRQCHIP has not been called, since this is an interface
between the virtual cpu core and virtual local APIC. After KVM_CREATE_IRQCHIP
has been called, this interface is completely emulated within the kernel.

To use this to emulate the LINT1 input with KVM_CREATE_IRQCHIP, use the
following algorithm:

- pause the vpcu
- read the local APIC's state (KVM_GET_LAPIC)
- check whether changing LINT1 will queue an NMI (see the LVT entry for LINT1)
- if so, issue KVM_NMI
- resume the vcpu

Some guests configure the LINT1 NMI input to cause a panic, aiding in
debugging.

5. The kvm_run structure

Application code obtains a pointer to the kvm_run structure by
Expand Down
12 changes: 5 additions & 7 deletions arch/ia64/kvm/kvm-ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -774,13 +774,13 @@ struct kvm *kvm_arch_alloc_vm(void)
return kvm;
}

struct kvm_io_range {
struct kvm_ia64_io_range {
unsigned long start;
unsigned long size;
unsigned long type;
};

static const struct kvm_io_range io_ranges[] = {
static const struct kvm_ia64_io_range io_ranges[] = {
{VGA_IO_START, VGA_IO_SIZE, GPFN_FRAME_BUFFER},
{MMIO_START, MMIO_SIZE, GPFN_LOW_MMIO},
{LEGACY_IO_START, LEGACY_IO_SIZE, GPFN_LEGACY_IO},
Expand Down Expand Up @@ -1366,14 +1366,12 @@ static void kvm_release_vm_pages(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
int i, j;
int j;
unsigned long base_gfn;

slots = kvm_memslots(kvm);
for (i = 0; i < slots->nmemslots; i++) {
memslot = &slots->memslots[i];
kvm_for_each_memslot(memslot, slots) {
base_gfn = memslot->base_gfn;

for (j = 0; j < memslot->npages; j++) {
if (memslot->rmap[j])
put_page((struct page *)memslot->rmap[j]);
Expand Down Expand Up @@ -1820,7 +1818,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (log->slot >= KVM_MEMORY_SLOTS)
goto out;

memslot = &kvm->memslots->memslots[log->slot];
memslot = id_to_memslot(kvm->memslots, log->slot);
r = -ENOENT;
if (!memslot->dirty_bitmap)
goto out;
Expand Down
4 changes: 2 additions & 2 deletions arch/powerpc/include/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,8 @@ struct kvm_sregs {
} ppc64;
struct {
__u32 sr[16];
__u64 ibat[8];
__u64 dbat[8];
__u64 ibat[8];
__u64 dbat[8];
} ppc32;
} s;
struct {
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -498,7 +498,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,

/* If nothing is dirty, don't bother messing with page tables. */
if (is_dirty) {
memslot = &kvm->memslots->memslots[log->slot];
memslot = id_to_memslot(kvm->memslots, log->slot);

ga = memslot->base_gfn << PAGE_SHIFT;
ga_end = ga + (memslot->npages << PAGE_SHIFT);
Expand Down
2 changes: 1 addition & 1 deletion arch/powerpc/kvm/book3s_hv_builtin.c
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
* to allocate contiguous physical memory for the real memory
* areas for guests.
*/
void kvm_rma_init(void)
void __init kvm_rma_init(void)
{
unsigned long i;
unsigned long j, npages;
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/include/asm/cpufeature.h
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,10 @@

/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
#define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
#define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */

#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
Expand Down
2 changes: 2 additions & 0 deletions arch/x86/include/asm/kvm_emulate.h
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ struct x86_emulate_ops {
int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
int (*read_pmc)(struct x86_emulate_ctxt *ctxt, u32 pmc, u64 *pdata);
void (*halt)(struct x86_emulate_ctxt *ctxt);
void (*wbinvd)(struct x86_emulate_ctxt *ctxt);
int (*fix_hypercall)(struct x86_emulate_ctxt *ctxt);
Expand Down Expand Up @@ -364,6 +365,7 @@ enum x86_intercept {
#endif

int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
bool x86_page_table_writing_insn(struct x86_emulate_ctxt *ctxt);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
#define EMULATION_RESTART 1
Expand Down
90 changes: 68 additions & 22 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,10 +16,12 @@
#include <linux/mmu_notifier.h>
#include <linux/tracepoint.h>
#include <linux/cpumask.h>
#include <linux/irq_work.h>

#include <linux/kvm.h>
#include <linux/kvm_para.h>
#include <linux/kvm_types.h>
#include <linux/perf_event.h>

#include <asm/pvclock-abi.h>
#include <asm/desc.h>
Expand All @@ -31,6 +33,8 @@
#define KVM_MEMORY_SLOTS 32
/* memory slots that does not exposed to userspace */
#define KVM_PRIVATE_MEM_SLOTS 4
#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)

#define KVM_MMIO_SIZE 16

#define KVM_PIO_PAGE_OFFSET 1
Expand Down Expand Up @@ -228,7 +232,7 @@ struct kvm_mmu_page {
* One bit set per slot which has memory
* in this shadow page.
*/
DECLARE_BITMAP(slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
DECLARE_BITMAP(slot_bitmap, KVM_MEM_SLOTS_NUM);
bool unsync;
int root_count; /* Currently serving as active root */
unsigned int unsync_children;
Expand All @@ -239,14 +243,9 @@ struct kvm_mmu_page {
int clear_spte_count;
#endif

struct rcu_head rcu;
};
int write_flooding_count;

struct kvm_pv_mmu_op_buffer {
void *ptr;
unsigned len;
unsigned processed;
char buf[512] __aligned(sizeof(long));
struct rcu_head rcu;
};

struct kvm_pio_request {
Expand Down Expand Up @@ -294,6 +293,37 @@ struct kvm_mmu {
u64 pdptrs[4]; /* pae */
};

enum pmc_type {
KVM_PMC_GP = 0,
KVM_PMC_FIXED,
};

struct kvm_pmc {
enum pmc_type type;
u8 idx;
u64 counter;
u64 eventsel;
struct perf_event *perf_event;
struct kvm_vcpu *vcpu;
};

struct kvm_pmu {
unsigned nr_arch_gp_counters;
unsigned nr_arch_fixed_counters;
unsigned available_event_types;
u64 fixed_ctr_ctrl;
u64 global_ctrl;
u64 global_status;
u64 global_ovf_ctrl;
u64 counter_bitmask[2];
u64 global_ctrl_mask;
u8 version;
struct kvm_pmc gp_counters[X86_PMC_MAX_GENERIC];
struct kvm_pmc fixed_counters[X86_PMC_MAX_FIXED];
struct irq_work irq_work;
u64 reprogram_pmi;
};

struct kvm_vcpu_arch {
/*
* rip and regs accesses must go through
Expand Down Expand Up @@ -345,19 +375,10 @@ struct kvm_vcpu_arch {
*/
struct kvm_mmu *walk_mmu;

/* only needed in kvm_pv_mmu_op() path, but it's hot so
* put it here to avoid allocation */
struct kvm_pv_mmu_op_buffer mmu_op_buffer;

struct kvm_mmu_memory_cache mmu_pte_list_desc_cache;
struct kvm_mmu_memory_cache mmu_page_cache;
struct kvm_mmu_memory_cache mmu_page_header_cache;

gfn_t last_pt_write_gfn;
int last_pt_write_count;
u64 *last_pte_updated;
gfn_t last_pte_gfn;

struct fpu guest_fpu;
u64 xcr0;

Expand Down Expand Up @@ -436,6 +457,8 @@ struct kvm_vcpu_arch {
unsigned access;
gfn_t mmio_gfn;

struct kvm_pmu pmu;

/* used for guest single stepping over the given code position */
unsigned long singlestep_rip;

Expand All @@ -444,6 +467,9 @@ struct kvm_vcpu_arch {

cpumask_var_t wbinvd_dirty_mask;

unsigned long last_retry_eip;
unsigned long last_retry_addr;

struct {
bool halted;
gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
Expand All @@ -459,7 +485,6 @@ struct kvm_arch {
unsigned int n_requested_mmu_pages;
unsigned int n_max_mmu_pages;
unsigned int indirect_shadow_pages;
atomic_t invlpg_counter;
struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES];
/*
* Hash table of struct kvm_mmu_page.
Expand Down Expand Up @@ -660,6 +685,8 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,

int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn,
struct kvm_memory_slot *slot);
void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
Expand All @@ -668,8 +695,6 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3);

int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
const void *val, int bytes);
int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
gpa_t addr, unsigned long *ret);
u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn);

extern bool tdp_enabled;
Expand All @@ -692,6 +717,7 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_RETRY (1 << 3)
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len);

Expand Down Expand Up @@ -734,6 +760,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data);

unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu);
void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
bool kvm_rdpmc(struct kvm_vcpu *vcpu);

void kvm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr);
void kvm_queue_exception_e(struct kvm_vcpu *vcpu, unsigned nr, u32 error_code);
Expand All @@ -754,13 +781,14 @@ int fx_init(struct kvm_vcpu *vcpu);

void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
const u8 *new, int bytes,
bool guest_initiated);
const u8 *new, int bytes);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
int kvm_mmu_load(struct kvm_vcpu *vcpu);
void kvm_mmu_unload(struct kvm_vcpu *vcpu);
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access);
gpa_t kvm_mmu_gva_to_gpa_read(struct kvm_vcpu *vcpu, gva_t gva,
struct x86_exception *exception);
gpa_t kvm_mmu_gva_to_gpa_fetch(struct kvm_vcpu *vcpu, gva_t gva,
Expand All @@ -782,6 +810,11 @@ void kvm_disable_tdp(void);
int complete_pio(struct kvm_vcpu *vcpu);
bool kvm_check_iopl(struct kvm_vcpu *vcpu);

static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access)
{
return gpa;
}

static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
{
struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
Expand Down Expand Up @@ -894,4 +927,17 @@ extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);

void kvm_complete_insn_gp(struct kvm_vcpu *vcpu, int err);

int kvm_is_in_guest(void);

void kvm_pmu_init(struct kvm_vcpu *vcpu);
void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
void kvm_pmu_reset(struct kvm_vcpu *vcpu);
void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu);
bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr);
int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data);
int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
void kvm_handle_pmu_event(struct kvm_vcpu *vcpu);
void kvm_deliver_pmi(struct kvm_vcpu *vcpu);

#endif /* _ASM_X86_KVM_HOST_H */
Loading

0 comments on commit 3dcf6c1

Please sign in to comment.