Skip to content

Commit

Permalink
KVM guest: remove KVM guest pv mmu support
Browse files Browse the repository at this point in the history
This has not been used for some years now.  It's time to remove it.

Signed-off-by: Chris Wright <chrisw@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
  • Loading branch information
Chris Wright authored and Avi Kivity committed Dec 27, 2011
1 parent 1a21424 commit 5202397
Showing 1 changed file with 0 additions and 181 deletions.
181 changes: 0 additions & 181 deletions arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,6 @@
#include <asm/desc.h>
#include <asm/tlbflush.h>

#define MMU_QUEUE_SIZE 1024

static int kvmapf = 1;

static int parse_no_kvmapf(char *arg)
Expand All @@ -60,21 +58,10 @@ static int parse_no_stealacc(char *arg)

early_param("no-steal-acc", parse_no_stealacc);

struct kvm_para_state {
u8 mmu_queue[MMU_QUEUE_SIZE];
int mmu_queue_len;
};

static DEFINE_PER_CPU(struct kvm_para_state, para_state);
static DEFINE_PER_CPU(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;

static struct kvm_para_state *kvm_para_state(void)
{
return &per_cpu(para_state, raw_smp_processor_id());
}

/*
* No need for any "IO delay" on KVM
*/
Expand Down Expand Up @@ -271,151 +258,6 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code)
}
}

static void kvm_mmu_op(void *buffer, unsigned len)
{
int r;
unsigned long a1, a2;

do {
a1 = __pa(buffer);
a2 = 0; /* on i386 __pa() always returns <4G */
r = kvm_hypercall3(KVM_HC_MMU_OP, len, a1, a2);
buffer += r;
len -= r;
} while (len);
}

static void mmu_queue_flush(struct kvm_para_state *state)
{
if (state->mmu_queue_len) {
kvm_mmu_op(state->mmu_queue, state->mmu_queue_len);
state->mmu_queue_len = 0;
}
}

static void kvm_deferred_mmu_op(void *buffer, int len)
{
struct kvm_para_state *state = kvm_para_state();

if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) {
kvm_mmu_op(buffer, len);
return;
}
if (state->mmu_queue_len + len > sizeof state->mmu_queue)
mmu_queue_flush(state);
memcpy(state->mmu_queue + state->mmu_queue_len, buffer, len);
state->mmu_queue_len += len;
}

static void kvm_mmu_write(void *dest, u64 val)
{
__u64 pte_phys;
struct kvm_mmu_op_write_pte wpte;

#ifdef CONFIG_HIGHPTE
struct page *page;
unsigned long dst = (unsigned long) dest;

page = kmap_atomic_to_page(dest);
pte_phys = page_to_pfn(page);
pte_phys <<= PAGE_SHIFT;
pte_phys += (dst & ~(PAGE_MASK));
#else
pte_phys = (unsigned long)__pa(dest);
#endif
wpte.header.op = KVM_MMU_OP_WRITE_PTE;
wpte.pte_val = val;
wpte.pte_phys = pte_phys;

kvm_deferred_mmu_op(&wpte, sizeof wpte);
}

/*
* We only need to hook operations that are MMU writes. We hook these so that
* we can use lazy MMU mode to batch these operations. We could probably
* improve the performance of the host code if we used some of the information
* here to simplify processing of batched writes.
*/
static void kvm_set_pte(pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}

static void kvm_set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}

static void kvm_set_pmd(pmd_t *pmdp, pmd_t pmd)
{
kvm_mmu_write(pmdp, pmd_val(pmd));
}

#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
static void kvm_set_pte_atomic(pte_t *ptep, pte_t pte)
{
kvm_mmu_write(ptep, pte_val(pte));
}

static void kvm_pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
kvm_mmu_write(ptep, 0);
}

static void kvm_pmd_clear(pmd_t *pmdp)
{
kvm_mmu_write(pmdp, 0);
}
#endif

static void kvm_set_pud(pud_t *pudp, pud_t pud)
{
kvm_mmu_write(pudp, pud_val(pud));
}

#if PAGETABLE_LEVELS == 4
static void kvm_set_pgd(pgd_t *pgdp, pgd_t pgd)
{
kvm_mmu_write(pgdp, pgd_val(pgd));
}
#endif
#endif /* PAGETABLE_LEVELS >= 3 */

static void kvm_flush_tlb(void)
{
struct kvm_mmu_op_flush_tlb ftlb = {
.header.op = KVM_MMU_OP_FLUSH_TLB,
};

kvm_deferred_mmu_op(&ftlb, sizeof ftlb);
}

static void kvm_release_pt(unsigned long pfn)
{
struct kvm_mmu_op_release_pt rpt = {
.header.op = KVM_MMU_OP_RELEASE_PT,
.pt_phys = (u64)pfn << PAGE_SHIFT,
};

kvm_mmu_op(&rpt, sizeof rpt);
}

static void kvm_enter_lazy_mmu(void)
{
paravirt_enter_lazy_mmu();
}

static void kvm_leave_lazy_mmu(void)
{
struct kvm_para_state *state = kvm_para_state();

mmu_queue_flush(state);
paravirt_leave_lazy_mmu();
}

static void __init paravirt_ops_setup(void)
{
pv_info.name = "KVM";
Expand All @@ -424,29 +266,6 @@ static void __init paravirt_ops_setup(void)
if (kvm_para_has_feature(KVM_FEATURE_NOP_IO_DELAY))
pv_cpu_ops.io_delay = kvm_io_delay;

if (kvm_para_has_feature(KVM_FEATURE_MMU_OP)) {
pv_mmu_ops.set_pte = kvm_set_pte;
pv_mmu_ops.set_pte_at = kvm_set_pte_at;
pv_mmu_ops.set_pmd = kvm_set_pmd;
#if PAGETABLE_LEVELS >= 3
#ifdef CONFIG_X86_PAE
pv_mmu_ops.set_pte_atomic = kvm_set_pte_atomic;
pv_mmu_ops.pte_clear = kvm_pte_clear;
pv_mmu_ops.pmd_clear = kvm_pmd_clear;
#endif
pv_mmu_ops.set_pud = kvm_set_pud;
#if PAGETABLE_LEVELS == 4
pv_mmu_ops.set_pgd = kvm_set_pgd;
#endif
#endif
pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
pv_mmu_ops.release_pte = kvm_release_pt;
pv_mmu_ops.release_pmd = kvm_release_pt;
pv_mmu_ops.release_pud = kvm_release_pt;

pv_mmu_ops.lazy_mode.enter = kvm_enter_lazy_mmu;
pv_mmu_ops.lazy_mode.leave = kvm_leave_lazy_mmu;
}
#ifdef CONFIG_X86_IO_APIC
no_timer_check = 1;
#endif
Expand Down

0 comments on commit 5202397

Please sign in to comment.