Skip to content

Commit

Permalink
kvm: x86: Set highest physical address bits in non-present/reserved S…
Browse files Browse the repository at this point in the history
…PTEs

Always set the 5 upper-most supported physical address bits to 1 for SPTEs
that are marked as non-present or reserved, to make them unusable for
L1TF attacks from the guest. Currently, this just applies to MMIO SPTEs.
(We do not need to mark PTEs that are completely 0 as physical page 0
is already reserved.)

This allows mitigation of L1TF without disabling hyper-threading by using
shadow paging mode instead of EPT.

Signed-off-by: Junaid Shahid <junaids@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Junaid Shahid authored and Paolo Bonzini committed Aug 14, 2018
1 parent fd8ca6d commit 28a1f3a
Show file tree
Hide file tree
Showing 2 changed files with 44 additions and 7 deletions.
43 changes: 38 additions & 5 deletions arch/x86/kvm/mmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,17 @@ static const u64 shadow_acc_track_saved_bits_mask = PT64_EPT_READABLE_MASK |
PT64_EPT_EXECUTABLE_MASK;
static const u64 shadow_acc_track_saved_bits_shift = PT64_SECOND_AVAIL_BITS_SHIFT;

/*
* This mask must be set on all non-zero Non-Present or Reserved SPTEs in order
* to guard against L1TF attacks.
*/
static u64 __read_mostly shadow_nonpresent_or_rsvd_mask;

/*
* The number of high-order 1 bits to use in the mask above.
*/
static const u64 shadow_nonpresent_or_rsvd_mask_len = 5;

static void mmu_spte_set(u64 *sptep, u64 spte);
static union kvm_mmu_page_role
kvm_mmu_calc_root_page_role(struct kvm_vcpu *vcpu);
Expand Down Expand Up @@ -327,9 +338,13 @@ static void mark_mmio_spte(struct kvm_vcpu *vcpu, u64 *sptep, u64 gfn,
{
unsigned int gen = kvm_current_mmio_generation(vcpu);
u64 mask = generation_mmio_spte_mask(gen);
u64 gpa = gfn << PAGE_SHIFT;

access &= ACC_WRITE_MASK | ACC_USER_MASK;
mask |= shadow_mmio_value | access | gfn << PAGE_SHIFT;
mask |= shadow_mmio_value | access;
mask |= gpa | shadow_nonpresent_or_rsvd_mask;
mask |= (gpa & shadow_nonpresent_or_rsvd_mask)
<< shadow_nonpresent_or_rsvd_mask_len;

trace_mark_mmio_spte(sptep, gfn, access, gen);
mmu_spte_set(sptep, mask);
Expand All @@ -342,8 +357,14 @@ static bool is_mmio_spte(u64 spte)

static gfn_t get_mmio_spte_gfn(u64 spte)
{
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask;
return (spte & ~mask) >> PAGE_SHIFT;
u64 mask = generation_mmio_spte_mask(MMIO_GEN_MASK) | shadow_mmio_mask |
shadow_nonpresent_or_rsvd_mask;
u64 gpa = spte & ~mask;

gpa |= (spte >> shadow_nonpresent_or_rsvd_mask_len)
& shadow_nonpresent_or_rsvd_mask;

return gpa >> PAGE_SHIFT;
}

static unsigned get_mmio_spte_access(u64 spte)
Expand Down Expand Up @@ -400,7 +421,7 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
}
EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);

static void kvm_mmu_clear_all_pte_masks(void)
static void kvm_mmu_reset_all_pte_masks(void)
{
shadow_user_mask = 0;
shadow_accessed_mask = 0;
Expand All @@ -410,6 +431,18 @@ static void kvm_mmu_clear_all_pte_masks(void)
shadow_mmio_mask = 0;
shadow_present_mask = 0;
shadow_acc_track_mask = 0;

/*
* If the CPU has 46 or less physical address bits, then set an
* appropriate mask to guard against L1TF attacks. Otherwise, it is
* assumed that the CPU is not vulnerable to L1TF.
*/
if (boot_cpu_data.x86_phys_bits <
52 - shadow_nonpresent_or_rsvd_mask_len)
shadow_nonpresent_or_rsvd_mask =
rsvd_bits(boot_cpu_data.x86_phys_bits -
shadow_nonpresent_or_rsvd_mask_len,
boot_cpu_data.x86_phys_bits - 1);
}

static int is_cpuid_PSE36(void)
Expand Down Expand Up @@ -5819,7 +5852,7 @@ int kvm_mmu_module_init(void)
{
int ret = -ENOMEM;

kvm_mmu_clear_all_pte_masks();
kvm_mmu_reset_all_pte_masks();

pte_list_desc_cache = kmem_cache_create("pte_list_desc",
sizeof(struct pte_list_desc),
Expand Down
8 changes: 6 additions & 2 deletions arch/x86/kvm/x86.c
Original file line number Diff line number Diff line change
Expand Up @@ -6536,8 +6536,12 @@ static void kvm_set_mmio_spte_mask(void)
* Set the reserved bits and the present bit of an paging-structure
* entry to generate page fault with PFER.RSV = 1.
*/
/* Mask the reserved physical address bits. */
mask = rsvd_bits(maxphyaddr, 51);

/*
* Mask the uppermost physical address bit, which would be reserved as
* long as the supported physical address width is less than 52.
*/
mask = 1ull << 51;

/* Set the present bit. */
mask |= 1ull;
Expand Down

0 comments on commit 28a1f3a

Please sign in to comment.