Skip to content

Commit

Permalink
x86/mm: Randomize per-cpu entry area
Browse files Browse the repository at this point in the history
Seth found that the CPU-entry-area; the piece of per-cpu data that is
mapped into the userspace page-tables for kPTI is not subject to any
randomization -- irrespective of kASLR settings.

On x86_64 a whole P4D (512 GB) of virtual address space is reserved for
this structure, which is plenty large enough to randomize things a
little.

As such, use a straight forward randomization scheme that avoids
duplicates to spread the existing CPUs over the available space.

  [ bp: Fix le build. ]

Reported-by: Seth Jenkins <sethjenkins@google.com>
Reviewed-by: Kees Cook <keescook@chromium.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
  • Loading branch information
Peter Zijlstra authored and Dave Hansen committed Dec 15, 2022
1 parent 3f148f3 commit 97e3d26
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 10 deletions.
4 changes: 0 additions & 4 deletions arch/x86/include/asm/cpu_entry_area.h
Original file line number Diff line number Diff line change
Expand Up @@ -130,10 +130,6 @@ struct cpu_entry_area {
};

#define CPU_ENTRY_AREA_SIZE (sizeof(struct cpu_entry_area))
#define CPU_ENTRY_AREA_ARRAY_SIZE (CPU_ENTRY_AREA_SIZE * NR_CPUS)

/* Total size includes the readonly IDT mapping page as well: */
#define CPU_ENTRY_AREA_TOTAL_SIZE (CPU_ENTRY_AREA_ARRAY_SIZE + PAGE_SIZE)

DECLARE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
DECLARE_PER_CPU(struct cea_exception_stacks *, cea_exception_stacks);
Expand Down
8 changes: 7 additions & 1 deletion arch/x86/include/asm/pgtable_areas.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,12 @@

#define CPU_ENTRY_AREA_RO_IDT_VADDR ((void *)CPU_ENTRY_AREA_RO_IDT)

#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + CPU_ENTRY_AREA_ARRAY_SIZE - CPU_ENTRY_AREA_BASE)
#ifdef CONFIG_X86_32
#define CPU_ENTRY_AREA_MAP_SIZE (CPU_ENTRY_AREA_PER_CPU + \
(CPU_ENTRY_AREA_SIZE * NR_CPUS) - \
CPU_ENTRY_AREA_BASE)
#else
#define CPU_ENTRY_AREA_MAP_SIZE P4D_SIZE
#endif

#endif /* _ASM_X86_PGTABLE_AREAS_H */
2 changes: 1 addition & 1 deletion arch/x86/kernel/hw_breakpoint.c
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ static inline bool within_cpu_entry(unsigned long addr, unsigned long end)

/* CPU entry erea is always used for CPU entry */
if (within_area(addr, end, CPU_ENTRY_AREA_BASE,
CPU_ENTRY_AREA_TOTAL_SIZE))
CPU_ENTRY_AREA_MAP_SIZE))
return true;

/*
Expand Down
46 changes: 42 additions & 4 deletions arch/x86/mm/cpu_entry_area.c
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,53 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(struct entry_stack_page, entry_stack_storage)
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU_PAGE_ALIGNED(struct exception_stacks, exception_stacks);
DEFINE_PER_CPU(struct cea_exception_stacks*, cea_exception_stacks);
#endif

#ifdef CONFIG_X86_32
static DEFINE_PER_CPU_READ_MOSTLY(unsigned long, _cea_offset);

static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return per_cpu(_cea_offset, cpu);
}

static __init void init_cea_offsets(void)
{
unsigned int max_cea;
unsigned int i, j;

max_cea = (CPU_ENTRY_AREA_MAP_SIZE - PAGE_SIZE) / CPU_ENTRY_AREA_SIZE;

/* O(sodding terrible) */
for_each_possible_cpu(i) {
unsigned int cea;

again:
cea = prandom_u32_max(max_cea);

for_each_possible_cpu(j) {
if (cea_offset(j) == cea)
goto again;

if (i == j)
break;
}

per_cpu(_cea_offset, i) = cea;
}
}
#else /* !X86_64 */
DECLARE_PER_CPU_PAGE_ALIGNED(struct doublefault_stack, doublefault_stack);

static __always_inline unsigned int cea_offset(unsigned int cpu)
{
return cpu;
}
static inline void init_cea_offsets(void) { }
#endif

/* Is called from entry code, so must be noinstr */
noinstr struct cpu_entry_area *get_cpu_entry_area(int cpu)
{
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cpu * CPU_ENTRY_AREA_SIZE;
unsigned long va = CPU_ENTRY_AREA_PER_CPU + cea_offset(cpu) * CPU_ENTRY_AREA_SIZE;
BUILD_BUG_ON(sizeof(struct cpu_entry_area) % PAGE_SIZE != 0);

return (struct cpu_entry_area *) va;
Expand Down Expand Up @@ -211,7 +248,6 @@ static __init void setup_cpu_entry_area_ptes(void)

/* The +1 is for the readonly IDT: */
BUILD_BUG_ON((CPU_ENTRY_AREA_PAGES+1)*PAGE_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUILD_BUG_ON(CPU_ENTRY_AREA_TOTAL_SIZE != CPU_ENTRY_AREA_MAP_SIZE);
BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK);

start = CPU_ENTRY_AREA_BASE;
Expand All @@ -227,6 +263,8 @@ void __init setup_cpu_entry_areas(void)
{
unsigned int cpu;

init_cea_offsets();

setup_cpu_entry_area_ptes();

for_each_possible_cpu(cpu)
Expand Down

0 comments on commit 97e3d26

Please sign in to comment.