Skip to content

Commit

Permalink
x86/mce: relocate set{clear}_mce_nospec() functions
Browse files Browse the repository at this point in the history
Relocate the twin mce functions to arch/x86/mm/pat/set_memory.c
file where they belong.

While at it, fixup a function name in a comment.

Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
Signed-off-by: Jane Chu <jane.chu@oracle.com>
Acked-by: Borislav Petkov <bp@suse.de>
Cc: Stephen Rothwell <sfr@canb.auug.org.au>
[sfr: gate {set,clear}_mce_nospec() by CONFIG_X86_64]
Link: https://lore.kernel.org/r/165272527328.90175.8336008202048685278.stgit@dwillia2-desk3.amr.corp.intel.com
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
  • Loading branch information
Jane Chu authored and Dan Williams committed May 16, 2022
1 parent 7917f9c commit b3fdf93
Show file tree
Hide file tree
Showing 3 changed files with 52 additions and 58 deletions.
52 changes: 0 additions & 52 deletions arch/x86/include/asm/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,56 +86,4 @@ bool kernel_page_present(struct page *page);

extern int kernel_set_to_readonly;

#ifdef CONFIG_X86_64
/*
* Prevent speculative access to the page by either unmapping
* it (if we do not require access to any part of the page) or
* marking it uncacheable (if we want to try to retrieve data
* from non-poisoned lines in the page).
*/
static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
unsigned long decoy_addr;
int rc;

/* SGX pages are not in the 1:1 map */
if (arch_is_platform_page(pfn << PAGE_SHIFT))
return 0;
/*
* We would like to just call:
* set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
* This relies on set_memory_XX() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));

if (unmap)
rc = set_memory_np(decoy_addr, 1);
else
rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
}
#define set_mce_nospec set_mce_nospec

/* Restore full speculative operation to the pfn. */
static inline int clear_mce_nospec(unsigned long pfn)
{
return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
}
#define clear_mce_nospec clear_mce_nospec
#else
/*
* Few people would run a 32-bit kernel on a machine that supports
* recoverable errors because they have too much memory to boot 32-bit.
*/
#endif

#endif /* _ASM_X86_SET_MEMORY_H */
50 changes: 48 additions & 2 deletions arch/x86/mm/pat/set_memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@
#include <linux/vmstat.h>
#include <linux/kernel.h>
#include <linux/cc_platform.h>
#include <linux/set_memory.h>

#include <asm/e820/api.h>
#include <asm/processor.h>
Expand All @@ -29,7 +30,6 @@
#include <asm/pgalloc.h>
#include <asm/proto.h>
#include <asm/memtype.h>
#include <asm/set_memory.h>
#include <asm/hyperv-tlfs.h>
#include <asm/mshyperv.h>

Expand Down Expand Up @@ -1816,7 +1816,7 @@ static inline int cpa_clear_pages_array(struct page **pages, int numpages,
}

/*
* _set_memory_prot is an internal helper for callers that have been passed
* __set_memory_prot is an internal helper for callers that have been passed
* a pgprot_t value from upper layers and a reservation has already been taken.
* If you want to set the pgprot to a specific page protocol, use the
* set_memory_xx() functions.
Expand Down Expand Up @@ -1925,6 +1925,52 @@ int set_memory_wb(unsigned long addr, int numpages)
}
EXPORT_SYMBOL(set_memory_wb);

/*
* Prevent speculative access to the page by either unmapping
* it (if we do not require access to any part of the page) or
* marking it uncacheable (if we want to try to retrieve data
* from non-poisoned lines in the page).
*/
#ifdef CONFIG_X86_64
int set_mce_nospec(unsigned long pfn, bool unmap)
{
unsigned long decoy_addr;
int rc;

/* SGX pages are not in the 1:1 map */
if (arch_is_platform_page(pfn << PAGE_SHIFT))
return 0;
/*
* We would like to just call:
* set_memory_XX((unsigned long)pfn_to_kaddr(pfn), 1);
* but doing that would radically increase the odds of a
* speculative access to the poison page because we'd have
* the virtual address of the kernel 1:1 mapping sitting
* around in registers.
* Instead we get tricky. We create a non-canonical address
* that looks just like the one we want, but has bit 63 flipped.
* This relies on set_memory_XX() properly sanitizing any __pa()
* results with __PHYSICAL_MASK or PTE_PFN_MASK.
*/
decoy_addr = (pfn << PAGE_SHIFT) + (PAGE_OFFSET ^ BIT(63));

if (unmap)
rc = set_memory_np(decoy_addr, 1);
else
rc = set_memory_uc(decoy_addr, 1);
if (rc)
pr_warn("Could not invalidate pfn=0x%lx from 1:1 map\n", pfn);
return rc;
}

/* Restore full speculative operation to the pfn. */
int clear_mce_nospec(unsigned long pfn)
{
return set_memory_wb((unsigned long) pfn_to_kaddr(pfn), 1);
}
EXPORT_SYMBOL_GPL(clear_mce_nospec);
#endif /* CONFIG_X86_64 */

int set_memory_x(unsigned long addr, int numpages)
{
if (!(__supported_pte_mask & _PAGE_NX))
Expand Down
8 changes: 4 additions & 4 deletions include/linux/set_memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,14 +42,14 @@ static inline bool can_set_direct_map(void)
#endif
#endif /* CONFIG_ARCH_HAS_SET_DIRECT_MAP */

#ifndef set_mce_nospec
#ifdef CONFIG_X86_64
int set_mce_nospec(unsigned long pfn, bool unmap);
int clear_mce_nospec(unsigned long pfn);
#else
static inline int set_mce_nospec(unsigned long pfn, bool unmap)
{
return 0;
}
#endif

#ifndef clear_mce_nospec
static inline int clear_mce_nospec(unsigned long pfn)
{
return 0;
Expand Down

0 comments on commit b3fdf93

Please sign in to comment.