Skip to content

Commit

Permalink
---
Browse files Browse the repository at this point in the history
yaml
---
r: 95975
b: refs/heads/master
c: 1024c5f
h: refs/heads/master
i:
  95973: 90956ed
  95971: b1c2b51
  95967: f350952
v: v3
  • Loading branch information
Bartlomiej Zolnierkiewicz committed May 4, 2008
1 parent 909ec45 commit 0dae73c
Show file tree
Hide file tree
Showing 20 changed files with 107 additions and 587 deletions.
2 changes: 1 addition & 1 deletion [refs]
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
---
refs/heads/master: 8dcf5782848600ecfd0df3a45c521b5ad0fcb42e
refs/heads/master: 1024c5f4be4fc5b00337464fb8a442bebf15df68
6 changes: 0 additions & 6 deletions trunk/arch/powerpc/kvm/booke_guest.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "inst_emu", VCPU_STAT(emulated_inst_exits) },
{ "dec", VCPU_STAT(dec_exits) },
{ "ext_intr", VCPU_STAT(ext_intr_exits) },
{ "halt_wakeup", VCPU_STAT(halt_wakeup) },
{ NULL }
};

Expand Down Expand Up @@ -339,11 +338,6 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
break;

case BOOKE_INTERRUPT_FP_UNAVAIL:
kvmppc_queue_exception(vcpu, exit_nr);
r = RESUME_GUEST;
break;

case BOOKE_INTERRUPT_DATA_STORAGE:
vcpu->arch.dear = vcpu->arch.fault_dear;
vcpu->arch.esr = vcpu->arch.fault_esr;
Expand Down
20 changes: 3 additions & 17 deletions trunk/arch/powerpc/kvm/powerpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -36,12 +36,13 @@ gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)

int kvm_cpu_has_interrupt(struct kvm_vcpu *v)
{
return !!(v->arch.pending_exceptions);
/* XXX implement me */
return 0;
}

int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
{
return !(v->arch.msr & MSR_WE);
return 1;
}


Expand Down Expand Up @@ -213,11 +214,6 @@ static void kvmppc_decrementer_func(unsigned long data)
struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;

kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_DECREMENTER);

if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}
}

int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
Expand Down Expand Up @@ -343,8 +339,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
int r;
sigset_t sigsaved;

vcpu_load(vcpu);

if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);

Expand All @@ -369,20 +363,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
if (vcpu->sigset_active)
sigprocmask(SIG_SETMASK, &sigsaved, NULL);

vcpu_put(vcpu);

return r;
}

int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
{
kvmppc_queue_exception(vcpu, BOOKE_INTERRUPT_EXTERNAL);

if (waitqueue_active(&vcpu->wq)) {
wake_up_interruptible(&vcpu->wq);
vcpu->stat.halt_wakeup++;
}

return 0;
}

Expand Down
4 changes: 0 additions & 4 deletions trunk/arch/x86/kernel/kvmclock.c
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,6 @@ static int kvm_register_clock(void)
return native_write_msr_safe(MSR_KVM_SYSTEM_TIME, low, high);
}

#ifdef CONFIG_X86_LOCAL_APIC
static void kvm_setup_secondary_clock(void)
{
/*
Expand All @@ -144,7 +143,6 @@ static void kvm_setup_secondary_clock(void)
/* ok, done with our trickery, call native */
setup_secondary_APIC_clock();
}
#endif

/*
* After the clock is registered, the host will keep writing to the
Expand Down Expand Up @@ -179,9 +177,7 @@ void __init kvmclock_init(void)
pv_time_ops.get_wallclock = kvm_get_wallclock;
pv_time_ops.set_wallclock = kvm_set_wallclock;
pv_time_ops.sched_clock = kvm_clock_read;
#ifdef CONFIG_X86_LOCAL_APIC
pv_apic_ops.setup_secondary_clock = kvm_setup_secondary_clock;
#endif
machine_ops.shutdown = kvm_shutdown;
#ifdef CONFIG_KEXEC
machine_ops.crash_shutdown = kvm_crash_shutdown;
Expand Down
2 changes: 0 additions & 2 deletions trunk/arch/x86/kvm/i8254.c
Original file line number Diff line number Diff line change
Expand Up @@ -288,8 +288,6 @@ static void pit_load_count(struct kvm *kvm, int channel, u32 val)
* mode 1 is one shot, mode 2 is period, otherwise del timer */
switch (ps->channels[0].mode) {
case 1:
/* FIXME: enhance mode 4 precision */
case 4:
create_pit_timer(&ps->pit_timer, val, 0);
break;
case 2:
Expand Down
89 changes: 52 additions & 37 deletions trunk/arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,6 +79,36 @@ static int dbg = 1;
}
#endif

#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
#define PT32_PT_BITS 10
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)

#define PT_WRITABLE_SHIFT 1

#define PT_PRESENT_MASK (1ULL << 0)
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
#define PT64_NX_SHIFT 63
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)

#define PT_PAT_SHIFT 7
#define PT_DIR_PAT_SHIFT 12
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)

#define PT32_DIR_PSE36_SIZE 4
#define PT32_DIR_PSE36_SHIFT 13
#define PT32_DIR_PSE36_MASK \
(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)


#define PT_FIRST_AVAIL_BITS_SHIFT 9
#define PT64_SECOND_AVAIL_BITS_SHIFT 52

Expand Down Expand Up @@ -124,6 +154,10 @@ static int dbg = 1;
#define PFERR_USER_MASK (1U << 2)
#define PFERR_FETCH_MASK (1U << 4)

#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3

#define PT_DIRECTORY_LEVEL 2
#define PT_PAGE_TABLE_LEVEL 1

Expand Down Expand Up @@ -152,12 +186,6 @@ static struct kmem_cache *mmu_page_header_cache;

static u64 __read_mostly shadow_trap_nonpresent_pte;
static u64 __read_mostly shadow_notrap_nonpresent_pte;
static u64 __read_mostly shadow_base_present_pte;
static u64 __read_mostly shadow_nx_mask;
static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
static u64 __read_mostly shadow_user_mask;
static u64 __read_mostly shadow_accessed_mask;
static u64 __read_mostly shadow_dirty_mask;

void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
{
Expand All @@ -166,23 +194,6 @@ void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);

void kvm_mmu_set_base_ptes(u64 base_pte)
{
shadow_base_present_pte = base_pte;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);

void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask)
{
shadow_user_mask = user_mask;
shadow_accessed_mask = accessed_mask;
shadow_dirty_mask = dirty_mask;
shadow_nx_mask = nx_mask;
shadow_x_mask = x_mask;
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

static int is_write_protection(struct kvm_vcpu *vcpu)
{
return vcpu->arch.cr0 & X86_CR0_WP;
Expand Down Expand Up @@ -221,7 +232,7 @@ static int is_writeble_pte(unsigned long pte)

static int is_dirty_pte(unsigned long pte)
{
return pte & shadow_dirty_mask;
return pte & PT_DIRTY_MASK;
}

static int is_rmap_pte(u64 pte)
Expand Down Expand Up @@ -376,6 +387,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn)

write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
*write_count += 1;
WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
}

static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
Expand Down Expand Up @@ -535,7 +547,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
return;
sp = page_header(__pa(spte));
pfn = spte_to_pfn(*spte);
if (*spte & shadow_accessed_mask)
if (*spte & PT_ACCESSED_MASK)
kvm_set_pfn_accessed(pfn);
if (is_writeble_pte(*spte))
kvm_release_pfn_dirty(pfn);
Expand Down Expand Up @@ -1061,17 +1073,17 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
* whether the guest actually used the pte (in order to detect
* demand paging).
*/
spte = shadow_base_present_pte | shadow_dirty_mask;
spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
if (!speculative)
pte_access |= PT_ACCESSED_MASK;
if (!dirty)
pte_access &= ~ACC_WRITE_MASK;
if (pte_access & ACC_EXEC_MASK)
spte |= shadow_x_mask;
else
spte |= shadow_nx_mask;
if (!(pte_access & ACC_EXEC_MASK))
spte |= PT64_NX_MASK;

spte |= PT_PRESENT_MASK;
if (pte_access & ACC_USER_MASK)
spte |= shadow_user_mask;
spte |= PT_USER_MASK;
if (largepage)
spte |= PT_PAGE_SIZE_MASK;

Expand Down Expand Up @@ -1176,9 +1188,8 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
return -ENOMEM;
}

table[index] = __pa(new_table->spt)
| PT_PRESENT_MASK | PT_WRITABLE_MASK
| shadow_user_mask | shadow_x_mask;
table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
| PT_WRITABLE_MASK | PT_USER_MASK;
}
table_addr = table[index] & PT64_BASE_ADDR_MASK;
}
Expand Down Expand Up @@ -1233,6 +1244,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return;
spin_lock(&vcpu->kvm->mmu_lock);
#ifdef CONFIG_X86_64
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;

Expand All @@ -1244,6 +1256,7 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
spin_unlock(&vcpu->kvm->mmu_lock);
return;
}
#endif
for (i = 0; i < 4; ++i) {
hpa_t root = vcpu->arch.mmu.pae_root[i];

Expand All @@ -1269,6 +1282,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)

root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;

#ifdef CONFIG_X86_64
if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
hpa_t root = vcpu->arch.mmu.root_hpa;

Expand All @@ -1283,6 +1297,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
vcpu->arch.mmu.root_hpa = root;
return;
}
#endif
metaphysical = !is_paging(vcpu);
if (tdp_enabled)
metaphysical = 1;
Expand Down Expand Up @@ -1362,7 +1377,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
largepage, gfn, pfn, kvm_x86_ops->get_tdp_level());
largepage, gfn, pfn, TDP_ROOT_LEVEL);
spin_unlock(&vcpu->kvm->mmu_lock);

return r;
Expand Down Expand Up @@ -1469,7 +1484,7 @@ static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
context->page_fault = tdp_page_fault;
context->free = nonpaging_free;
context->prefetch_page = nonpaging_prefetch_page;
context->shadow_root_level = kvm_x86_ops->get_tdp_level();
context->shadow_root_level = TDP_ROOT_LEVEL;
context->root_hpa = INVALID_PAGE;

if (!is_paging(vcpu)) {
Expand Down Expand Up @@ -1618,7 +1633,7 @@ static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
{
u64 *spte = vcpu->arch.last_pte_updated;

return !!(spte && (*spte & shadow_accessed_mask));
return !!(spte && (*spte & PT_ACCESSED_MASK));
}

static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
Expand Down
37 changes: 5 additions & 32 deletions trunk/arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -3,38 +3,11 @@

#include <linux/kvm_host.h>

#define PT64_PT_BITS 9
#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
#define PT32_PT_BITS 10
#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)

#define PT_WRITABLE_SHIFT 1

#define PT_PRESENT_MASK (1ULL << 0)
#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
#define PT_USER_MASK (1ULL << 2)
#define PT_PWT_MASK (1ULL << 3)
#define PT_PCD_MASK (1ULL << 4)
#define PT_ACCESSED_MASK (1ULL << 5)
#define PT_DIRTY_MASK (1ULL << 6)
#define PT_PAGE_SIZE_MASK (1ULL << 7)
#define PT_PAT_MASK (1ULL << 7)
#define PT_GLOBAL_MASK (1ULL << 8)
#define PT64_NX_SHIFT 63
#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)

#define PT_PAT_SHIFT 7
#define PT_DIR_PAT_SHIFT 12
#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)

#define PT32_DIR_PSE36_SIZE 4
#define PT32_DIR_PSE36_SHIFT 13
#define PT32_DIR_PSE36_MASK \
(((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)

#define PT64_ROOT_LEVEL 4
#define PT32_ROOT_LEVEL 2
#define PT32E_ROOT_LEVEL 3
#ifdef CONFIG_X86_64
#define TDP_ROOT_LEVEL PT64_ROOT_LEVEL
#else
#define TDP_ROOT_LEVEL PT32E_ROOT_LEVEL
#endif

static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
{
Expand Down
10 changes: 0 additions & 10 deletions trunk/arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -1863,15 +1863,6 @@ static bool svm_cpu_has_accelerated_tpr(void)
return false;
}

static int get_npt_level(void)
{
#ifdef CONFIG_X86_64
return PT64_ROOT_LEVEL;
#else
return PT32E_ROOT_LEVEL;
#endif
}

static struct kvm_x86_ops svm_x86_ops = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
Expand Down Expand Up @@ -1929,7 +1920,6 @@ static struct kvm_x86_ops svm_x86_ops = {
.inject_pending_vectors = do_interrupt_requests,

.set_tss_addr = svm_set_tss_addr,
.get_tdp_level = get_npt_level,
};

static int __init svm_init(void)
Expand Down
Loading

0 comments on commit 0dae73c

Please sign in to comment.