Skip to content

Commit

Permalink
Merge branch 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/vir…
Browse files Browse the repository at this point in the history
…t/kvm/kvm

* 'kvm-updates/2.6.34' of git://git.kernel.org/pub/scm/virt/kvm/kvm:
  KVM: x86: Fix TSS size check for 16-bit tasks
  KVM: Add missing srcu_read_lock() for kvm_mmu_notifier_release()
  KVM: Increase NR_IOBUS_DEVS limit to 200
  KVM: fix the handling of dirty bitmaps to avoid overflows
  KVM: MMU: fix kvm_mmu_zap_page() and its calling path
  KVM: VMX: Save/restore rflags.vm correctly in real mode
  KVM: allow bit 10 to be cleared in MSR_IA32_MC4_CTL
  KVM: Don't spam kernel log when injecting exceptions due to bad cr writes
  KVM: SVM: Fix memory leaks that happen when svm_create_vcpu() fails
  KVM: take srcu lock before call to complete_pio()
  • Loading branch information
Linus Torvalds committed Apr 21, 2010
2 parents 1519ae4 + e8861cf commit a486b0a
Show file tree
Hide file tree
Showing 8 changed files with 79 additions and 67 deletions.
9 changes: 5 additions & 4 deletions arch/ia64/kvm/kvm-ia64.c
Original file line number Diff line number Diff line change
Expand Up @@ -1802,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
{
struct kvm_memory_slot *memslot;
int r, i;
long n, base;
long base;
unsigned long n;
unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
offsetof(struct kvm_vm_data, kvm_mem_dirty_log));

Expand All @@ -1815,7 +1816,7 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
if (!memslot->dirty_bitmap)
goto out;

n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
n = kvm_dirty_bitmap_bytes(memslot);
base = memslot->base_gfn / BITS_PER_LONG;

for (i = 0; i < n/sizeof(long); ++i) {
Expand All @@ -1831,7 +1832,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_dirty_log *log)
{
int r;
int n;
unsigned long n;
struct kvm_memory_slot *memslot;
int is_dirty = 0;

Expand All @@ -1850,7 +1851,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
if (is_dirty) {
kvm_flush_remote_tlbs(kvm);
memslot = &kvm->memslots->memslots[log->slot];
n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}
r = 0;
Expand Down
5 changes: 3 additions & 2 deletions arch/powerpc/kvm/book3s.c
Original file line number Diff line number Diff line change
Expand Up @@ -1004,7 +1004,8 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
struct kvm_vcpu *vcpu;
ulong ga, ga_end;
int is_dirty = 0;
int r, n;
int r;
unsigned long n;

mutex_lock(&kvm->slots_lock);

Expand All @@ -1022,7 +1023,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
kvm_for_each_vcpu(n, vcpu, kvm)
kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);

n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
n = kvm_dirty_bitmap_bytes(memslot);
memset(memslot->dirty_bitmap, 0, n);
}

Expand Down
11 changes: 7 additions & 4 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -1490,8 +1490,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
for_each_sp(pages, sp, parents, i) {
kvm_mmu_zap_page(kvm, sp);
mmu_pages_clear_parents(&parents);
zapped++;
}
zapped += pages.nr;
kvm_mmu_pages_init(parent, &parents, &pages);
}

Expand Down Expand Up @@ -1542,14 +1542,16 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
*/

if (used_pages > kvm_nr_mmu_pages) {
while (used_pages > kvm_nr_mmu_pages) {
while (used_pages > kvm_nr_mmu_pages &&
!list_empty(&kvm->arch.active_mmu_pages)) {
struct kvm_mmu_page *page;

page = container_of(kvm->arch.active_mmu_pages.prev,
struct kvm_mmu_page, link);
kvm_mmu_zap_page(kvm, page);
used_pages -= kvm_mmu_zap_page(kvm, page);
used_pages--;
}
kvm_nr_mmu_pages = used_pages;
kvm->arch.n_free_mmu_pages = 0;
}
else
Expand Down Expand Up @@ -1596,7 +1598,8 @@ static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
&& !sp->role.invalid) {
pgprintk("%s: zap %lx %x\n",
__func__, gfn, sp->role.word);
kvm_mmu_zap_page(kvm, sp);
if (kvm_mmu_zap_page(kvm, sp))
nn = bucket->first;
}
}
}
Expand Down
25 changes: 15 additions & 10 deletions arch/x86/kvm/svm.c
Original file line number Diff line number Diff line change
Expand Up @@ -706,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
if (err)
goto free_svm;

err = -ENOMEM;
page = alloc_page(GFP_KERNEL);
if (!page) {
err = -ENOMEM;
if (!page)
goto uninit;
}

err = -ENOMEM;
msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!msrpm_pages)
goto uninit;
goto free_page1;

nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
if (!nested_msrpm_pages)
goto uninit;

svm->msrpm = page_address(msrpm_pages);
svm_vcpu_init_msrpm(svm->msrpm);
goto free_page2;

hsave_page = alloc_page(GFP_KERNEL);
if (!hsave_page)
goto uninit;
goto free_page3;

svm->nested.hsave = page_address(hsave_page);

svm->msrpm = page_address(msrpm_pages);
svm_vcpu_init_msrpm(svm->msrpm);

svm->nested.msrpm = page_address(nested_msrpm_pages);

svm->vmcb = page_address(page);
Expand All @@ -744,6 +743,12 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)

return &svm->vcpu;

free_page3:
__free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
free_page2:
__free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
free_page1:
__free_page(page);
uninit:
kvm_vcpu_uninit(&svm->vcpu);
free_svm:
Expand Down
24 changes: 15 additions & 9 deletions arch/x86/kvm/vmx.c
Original file line number Diff line number Diff line change
Expand Up @@ -77,6 +77,8 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
#define KVM_PMODE_VM_CR4_ALWAYS_ON (X86_CR4_PAE | X86_CR4_VMXE)
#define KVM_RMODE_VM_CR4_ALWAYS_ON (X86_CR4_VME | X86_CR4_PAE | X86_CR4_VMXE)

#define RMODE_GUEST_OWNED_EFLAGS_BITS (~(X86_EFLAGS_IOPL | X86_EFLAGS_VM))

/*
* These 2 parameters are used to config the controls for Pause-Loop Exiting:
* ple_gap: upper bound on the amount of time between two successive
Expand Down Expand Up @@ -131,7 +133,7 @@ struct vcpu_vmx {
} host_state;
struct {
int vm86_active;
u8 save_iopl;
ulong save_rflags;
struct kvm_save_segment {
u16 selector;
unsigned long base;
Expand Down Expand Up @@ -818,18 +820,23 @@ static void vmx_fpu_deactivate(struct kvm_vcpu *vcpu)

static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
{
unsigned long rflags;
unsigned long rflags, save_rflags;

rflags = vmcs_readl(GUEST_RFLAGS);
if (to_vmx(vcpu)->rmode.vm86_active)
rflags &= ~(unsigned long)(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
if (to_vmx(vcpu)->rmode.vm86_active) {
rflags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
save_rflags = to_vmx(vcpu)->rmode.save_rflags;
rflags |= save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
}
return rflags;
}

static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
{
if (to_vmx(vcpu)->rmode.vm86_active)
if (to_vmx(vcpu)->rmode.vm86_active) {
to_vmx(vcpu)->rmode.save_rflags = rflags;
rflags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;
}
vmcs_writel(GUEST_RFLAGS, rflags);
}

Expand Down Expand Up @@ -1483,8 +1490,8 @@ static void enter_pmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, vmx->rmode.tr.ar);

flags = vmcs_readl(GUEST_RFLAGS);
flags &= ~(X86_EFLAGS_IOPL | X86_EFLAGS_VM);
flags |= (vmx->rmode.save_iopl << IOPL_SHIFT);
flags &= RMODE_GUEST_OWNED_EFLAGS_BITS;
flags |= vmx->rmode.save_rflags & ~RMODE_GUEST_OWNED_EFLAGS_BITS;
vmcs_writel(GUEST_RFLAGS, flags);

vmcs_writel(GUEST_CR4, (vmcs_readl(GUEST_CR4) & ~X86_CR4_VME) |
Expand Down Expand Up @@ -1557,8 +1564,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
vmcs_write32(GUEST_TR_AR_BYTES, 0x008b);

flags = vmcs_readl(GUEST_RFLAGS);
vmx->rmode.save_iopl
= (flags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
vmx->rmode.save_rflags = flags;

flags |= X86_EFLAGS_IOPL | X86_EFLAGS_VM;

Expand Down
Loading

0 comments on commit a486b0a

Please sign in to comment.