Skip to content

Commit

Permalink
KVM: x86/mmu: remove "bool base_only" arguments
Browse files Browse the repository at this point in the history
The argument is always false now that kvm_mmu_calc_root_page_role has
been removed.

Reviewed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Paolo Bonzini committed Apr 29, 2022
1 parent 6819af7 commit b898050
Showing 1 changed file with 23 additions and 43 deletions.
66 changes: 23 additions & 43 deletions arch/x86/kvm/mmu/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -4748,47 +4748,30 @@ static void paging32_init_context(struct kvm_mmu *context)
context->direct_map = false;
}

static union kvm_mmu_extended_role kvm_calc_mmu_role_ext(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_extended_role ext = {0};

if (____is_cr0_pg(regs)) {
ext.cr0_pg = 1;
ext.cr4_pae = ____is_cr4_pae(regs);
ext.cr4_smep = ____is_cr4_smep(regs);
ext.cr4_smap = ____is_cr4_smap(regs);
ext.cr4_pse = ____is_cr4_pse(regs);

/* PKEY and LA57 are active iff long mode is active. */
ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
ext.efer_lma = ____is_efer_lma(regs);
}

ext.valid = 1;

return ext;
}

static union kvm_mmu_role kvm_calc_mmu_role_common(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs,
bool base_only)
const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role = {0};

role.base.access = ACC_ALL;
if (____is_cr0_pg(regs)) {
role.ext.cr0_pg = 1;
role.base.efer_nx = ____is_efer_nx(regs);
role.base.cr0_wp = ____is_cr0_wp(regs);

role.ext.cr4_pae = ____is_cr4_pae(regs);
role.ext.cr4_smep = ____is_cr4_smep(regs);
role.ext.cr4_smap = ____is_cr4_smap(regs);
role.ext.cr4_pse = ____is_cr4_pse(regs);

/* PKEY and LA57 are active iff long mode is active. */
role.ext.cr4_pke = ____is_efer_lma(regs) && ____is_cr4_pke(regs);
role.ext.cr4_la57 = ____is_efer_lma(regs) && ____is_cr4_la57(regs);
role.ext.efer_lma = ____is_efer_lma(regs);
}
role.base.smm = is_smm(vcpu);
role.base.guest_mode = is_guest_mode(vcpu);

if (base_only)
return role;

role.ext = kvm_calc_mmu_role_ext(vcpu, regs);
role.ext.valid = 1;

return role;
}
Expand All @@ -4808,10 +4791,9 @@ static inline int kvm_mmu_get_tdp_level(struct kvm_vcpu *vcpu)

static union kvm_mmu_role
kvm_calc_tdp_mmu_root_page_role(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs,
bool base_only)
const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);

role.base.ad_disabled = (shadow_accessed_mask == 0);
role.base.level = kvm_mmu_get_tdp_level(vcpu);
Expand All @@ -4826,7 +4808,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
kvm_calc_tdp_mmu_root_page_role(vcpu, regs, false);
kvm_calc_tdp_mmu_root_page_role(vcpu, regs);

if (new_role.as_u64 == context->mmu_role.as_u64)
return;
Expand Down Expand Up @@ -4855,10 +4837,9 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu,

static union kvm_mmu_role
kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs,
bool base_only)
const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs, base_only);
union kvm_mmu_role role = kvm_calc_mmu_role_common(vcpu, regs);

role.base.smep_andnot_wp = role.ext.cr4_smep && !____is_cr0_wp(regs);
role.base.smap_andnot_wp = role.ext.cr4_smap && !____is_cr0_wp(regs);
Expand All @@ -4869,11 +4850,10 @@ kvm_calc_shadow_root_page_role_common(struct kvm_vcpu *vcpu,

static union kvm_mmu_role
kvm_calc_shadow_mmu_root_page_role(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs,
bool base_only)
const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
kvm_calc_shadow_root_page_role_common(vcpu, regs, base_only);
kvm_calc_shadow_root_page_role_common(vcpu, regs);

role.base.direct = !____is_cr0_pg(regs);

Expand Down Expand Up @@ -4915,7 +4895,7 @@ static void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu,
{
struct kvm_mmu *context = &vcpu->arch.root_mmu;
union kvm_mmu_role new_role =
kvm_calc_shadow_mmu_root_page_role(vcpu, regs, false);
kvm_calc_shadow_mmu_root_page_role(vcpu, regs);

shadow_mmu_init_context(vcpu, context, regs, new_role);
}
Expand All @@ -4925,7 +4905,7 @@ kvm_calc_shadow_npt_root_page_role(struct kvm_vcpu *vcpu,
const struct kvm_mmu_role_regs *regs)
{
union kvm_mmu_role role =
kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
kvm_calc_shadow_root_page_role_common(vcpu, regs);

role.base.direct = false;
role.base.level = kvm_mmu_get_tdp_level(vcpu);
Expand Down Expand Up @@ -5026,7 +5006,7 @@ kvm_calc_nested_mmu_role(struct kvm_vcpu *vcpu, const struct kvm_mmu_role_regs *
{
union kvm_mmu_role role;

role = kvm_calc_shadow_root_page_role_common(vcpu, regs, false);
role = kvm_calc_shadow_root_page_role_common(vcpu, regs);

/*
* Nested MMUs are used only for walking L2's gva->gpa, they never have
Expand Down

0 comments on commit b898050

Please sign in to comment.