Skip to content

Commit

Permalink
kvm: x86: Move kvm_set_mmio_spte_mask() from x86.c to mmu.c
Browse files Browse the repository at this point in the history
[ Upstream commit 7b6f8a0 ]

As a prerequisite to fix several SPTE reserved bits related calculation
errors caused by MKTME, which requires kvm_set_mmio_spte_mask() to use
local static variable defined in mmu.c.

Also move call site of kvm_set_mmio_spte_mask() from kvm_arch_init() to
kvm_mmu_module_init() so that kvm_set_mmio_spte_mask() can be static.

Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Kai Huang <kai.huang@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
  • Loading branch information
Kai Huang authored and Greg Kroah-Hartman committed Jun 25, 2020
1 parent 0bd807d commit f4fd2ea
Show file tree
Hide file tree
Showing 2 changed files with 31 additions and 31 deletions.
31 changes: 31 additions & 0 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -5675,13 +5675,44 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp)
return 0;
}

static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;

/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/

/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;

/* Set the present bit. */
mask |= 1ull;

/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
mask &= ~1ull;

kvm_mmu_set_mmio_spte_mask(mask, mask);
}

int kvm_mmu_module_init(void)
{
if (nx_huge_pages == -1)
__set_nx_huge_pages(get_nx_auto_mode());

kvm_mmu_reset_all_pte_masks();

kvm_set_mmio_spte_mask();

pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
0, SLAB_ACCOUNT, NULL);
Expand Down
31 changes: 0 additions & 31 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6291,35 +6291,6 @@ void kvm_after_handle_nmi(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_after_handle_nmi);

static void kvm_set_mmio_spte_mask(void)
{
u64 mask;
int maxphyaddr = boot_cpu_data.x86_phys_bits;

/*
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/

/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;

/* Set the present bit. */
mask |= 1ull;

/*
* If reserved bit is not supported, clear the present bit to disable
* mmio page fault.
*/
if (maxphyaddr == 52)
mask &= ~1ull;

kvm_mmu_set_mmio_spte_mask(mask, mask);
}

#ifdef CONFIG_X86_64
static void pvclock_gtod_update_fn(struct work_struct *work)
{
Expand Down Expand Up @@ -6397,8 +6368,6 @@ int kvm_arch_init(void *opaque)
if (r)
goto out_free_percpu;

kvm_set_mmio_spte_mask();

kvm_x86_ops = ops;

kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,
Expand Down

0 comments on commit f4fd2ea

Please sign in to comment.