Skip to content

Commit

Permalink
KVM: x86/mmu: Make all page fault handlers internal to the MMU
Browse files Browse the repository at this point in the history
Move kvm_arch_async_page_ready() to mmu.c where it belongs, and move all
of the page fault handling collateral that was in mmu.h purely for the
async #PF handler into mmu_internal.h, where it belongs.  This will allow
kvm_mmu_do_page_fault() to act on the RET_PF_* return without having to
expose those enums outside of the MMU.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220423034752.1161007-8-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Sean Christopherson authored and Paolo Bonzini committed May 12, 2022
1 parent 5276c61 commit 8a009d5
Show file tree
Hide file tree
Showing 4 changed files with 108 additions and 107 deletions.
87 changes: 0 additions & 87 deletions arch/x86/kvm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -141,93 +141,6 @@ static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
vcpu->arch.mmu->root_role.level);
}

struct kvm_page_fault {
/* arguments to kvm_mmu_do_page_fault. */
const gpa_t addr;
const u32 error_code;
const bool prefetch;

/* Derived from error_code. */
const bool exec;
const bool write;
const bool present;
const bool rsvd;
const bool user;

/* Derived from mmu and global state. */
const bool is_tdp;
const bool nx_huge_page_workaround_enabled;

/*
* Whether a >4KB mapping can be created or is forbidden due to NX
* hugepages.
*/
bool huge_page_disallowed;

/*
* Maximum page size that can be created for this fault; input to
* FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
*/
u8 max_level;

/*
* Page size that can be created based on the max_level and the
* page size used by the host mapping.
*/
u8 req_level;

/*
* Page size that will be created based on the req_level and
* huge_page_disallowed.
*/
u8 goal_level;

/* Shifted addr, or result of guest page table walk if addr is a gva. */
gfn_t gfn;

/* The memslot containing gfn. May be NULL. */
struct kvm_memory_slot *slot;

/* Outputs of kvm_faultin_pfn. */
kvm_pfn_t pfn;
hva_t hva;
bool map_writable;
};

int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);

extern int nx_huge_pages;
static inline bool is_nx_huge_page_enabled(void)
{
return READ_ONCE(nx_huge_pages);
}

static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u32 err, bool prefetch)
{
struct kvm_page_fault fault = {
.addr = cr2_or_gpa,
.error_code = err,
.exec = err & PFERR_FETCH_MASK,
.write = err & PFERR_WRITE_MASK,
.present = err & PFERR_PRESENT_MASK,
.rsvd = err & PFERR_RSVD_MASK,
.user = err & PFERR_USER_MASK,
.prefetch = prefetch,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),

.max_level = KVM_MAX_HUGEPAGE_LEVEL,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
};
#ifdef CONFIG_RETPOLINE
if (fault.is_tdp)
return kvm_tdp_page_fault(vcpu, &fault);
#endif
return vcpu->arch.mmu->page_fault(vcpu, &fault);
}

/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
Expand Down
19 changes: 19 additions & 0 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -3942,6 +3942,25 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
}

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;

if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
work->wakeup_all)
return;

r = kvm_mmu_reload(vcpu);
if (unlikely(r))
return;

if (!vcpu->arch.mmu->root_role.direct &&
work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
return;

kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
}

static int kvm_faultin_pfn(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
{
struct kvm_memory_slot *slot = fault->slot;
Expand Down
90 changes: 89 additions & 1 deletion arch/x86/kvm/mmu/mmu_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,70 @@ void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
u64 start_gfn, u64 pages);
unsigned int pte_list_count(struct kvm_rmap_head *rmap_head);

extern int nx_huge_pages;
static inline bool is_nx_huge_page_enabled(void)
{
return READ_ONCE(nx_huge_pages);
}

struct kvm_page_fault {
/* arguments to kvm_mmu_do_page_fault. */
const gpa_t addr;
const u32 error_code;
const bool prefetch;

/* Derived from error_code. */
const bool exec;
const bool write;
const bool present;
const bool rsvd;
const bool user;

/* Derived from mmu and global state. */
const bool is_tdp;
const bool nx_huge_page_workaround_enabled;

/*
* Whether a >4KB mapping can be created or is forbidden due to NX
* hugepages.
*/
bool huge_page_disallowed;

/*
* Maximum page size that can be created for this fault; input to
* FNAME(fetch), __direct_map and kvm_tdp_mmu_map.
*/
u8 max_level;

/*
* Page size that can be created based on the max_level and the
* page size used by the host mapping.
*/
u8 req_level;

/*
* Page size that will be created based on the req_level and
* huge_page_disallowed.
*/
u8 goal_level;

/* Shifted addr, or result of guest page table walk if addr is a gva. */
gfn_t gfn;

/* The memslot containing gfn. May be NULL. */
struct kvm_memory_slot *slot;

/* Outputs of kvm_faultin_pfn. */
kvm_pfn_t pfn;
hva_t hva;
bool map_writable;
};

int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);

/*
* Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
* Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(),
* and of course kvm_mmu_do_page_fault().
*
* RET_PF_CONTINUE: So far, so good, keep handling the page fault.
* RET_PF_RETRY: let CPU fault again on the address.
Expand All @@ -167,6 +229,32 @@ enum {
RET_PF_SPURIOUS,
};

static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
u32 err, bool prefetch)
{
struct kvm_page_fault fault = {
.addr = cr2_or_gpa,
.error_code = err,
.exec = err & PFERR_FETCH_MASK,
.write = err & PFERR_WRITE_MASK,
.present = err & PFERR_PRESENT_MASK,
.rsvd = err & PFERR_RSVD_MASK,
.user = err & PFERR_USER_MASK,
.prefetch = prefetch,
.is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault),
.nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),

.max_level = KVM_MAX_HUGEPAGE_LEVEL,
.req_level = PG_LEVEL_4K,
.goal_level = PG_LEVEL_4K,
};
#ifdef CONFIG_RETPOLINE
if (fault.is_tdp)
return kvm_tdp_page_fault(vcpu, &fault);
#endif
return vcpu->arch.mmu->page_fault(vcpu, &fault);
}

int kvm_mmu_max_mapping_level(struct kvm *kvm,
const struct kvm_memory_slot *slot, gfn_t gfn,
kvm_pfn_t pfn, int max_level);
Expand Down
19 changes: 0 additions & 19 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -12358,25 +12358,6 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
}
EXPORT_SYMBOL_GPL(kvm_set_rflags);

void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
{
int r;

if ((vcpu->arch.mmu->root_role.direct != work->arch.direct_map) ||
work->wakeup_all)
return;

r = kvm_mmu_reload(vcpu);
if (unlikely(r))
return;

if (!vcpu->arch.mmu->root_role.direct &&
work->arch.cr3 != vcpu->arch.mmu->get_guest_pgd(vcpu))
return;

kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
}

static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
{
BUILD_BUG_ON(!is_power_of_2(ASYNC_PF_PER_VCPU));
Expand Down

0 comments on commit 8a009d5

Please sign in to comment.