Skip to content

Commit

Permalink
mm/execmem: Unify early execmem_cache behaviour
Browse files Browse the repository at this point in the history
Early kernel memory is RWX, only at the end of early boot (before SMP)
do we mark things ROX. Have execmem_cache mirror this behaviour for
early users.

This avoids having to remember what code is execmem and what is not --
we can poke everything with impunity ;-) Also performance for not
having to do endless text_poke_mm switches.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
  • Loading branch information
Peter Zijlstra authored and Dave Hansen committed May 9, 2025
1 parent f0cd709 commit d6d1e3e
Show file tree
Hide file tree
Showing 4 changed files with 50 additions and 4 deletions.
3 changes: 3 additions & 0 deletions arch/x86/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
#include <linux/initrd.h>
#include <linux/cpumask.h>
#include <linux/gfp.h>
#include <linux/execmem.h>

#include <asm/asm.h>
#include <asm/bios_ebda.h>
Expand Down Expand Up @@ -755,6 +756,8 @@ void mark_rodata_ro(void)
pr_info("Write protecting kernel text and read-only data: %luk\n",
size >> 10);

execmem_cache_make_ro();

kernel_set_to_readonly = 1;

#ifdef CONFIG_CPA_DEBUG
Expand Down
3 changes: 3 additions & 0 deletions arch/x86/mm/init_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include <linux/gfp.h>
#include <linux/kcore.h>
#include <linux/bootmem_info.h>
#include <linux/execmem.h>

#include <asm/processor.h>
#include <asm/bios_ebda.h>
Expand Down Expand Up @@ -1391,6 +1392,8 @@ void mark_rodata_ro(void)
(end - start) >> 10);
set_memory_ro(start, (end - start) >> PAGE_SHIFT);

execmem_cache_make_ro();

kernel_set_to_readonly = 1;

/*
Expand Down
8 changes: 7 additions & 1 deletion include/linux/execmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ enum execmem_range_flags {
EXECMEM_ROX_CACHE = (1 << 1),
};

#ifdef CONFIG_ARCH_HAS_EXECMEM_ROX
#if defined(CONFIG_ARCH_HAS_EXECMEM_ROX) && defined(CONFIG_EXECMEM)
/**
* execmem_fill_trapping_insns - set memory to contain instructions that
* will trap
Expand Down Expand Up @@ -93,9 +93,15 @@ int execmem_make_temp_rw(void *ptr, size_t size);
* Return: 0 on success or negative error code on failure.
*/
int execmem_restore_rox(void *ptr, size_t size);

/*
* Called from mark_readonly(), where the system transitions to ROX.
*/
void execmem_cache_make_ro(void);
#else
static inline int execmem_make_temp_rw(void *ptr, size_t size) { return 0; }
static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; }
static inline void execmem_cache_make_ro(void) { }
#endif

/**
Expand Down
40 changes: 37 additions & 3 deletions mm/execmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -254,6 +254,34 @@ static void *__execmem_cache_alloc(struct execmem_range *range, size_t size)
return ptr;
}

static bool execmem_cache_rox = false;

void execmem_cache_make_ro(void)
{
struct maple_tree *free_areas = &execmem_cache.free_areas;
struct maple_tree *busy_areas = &execmem_cache.busy_areas;
MA_STATE(mas_free, free_areas, 0, ULONG_MAX);
MA_STATE(mas_busy, busy_areas, 0, ULONG_MAX);
struct mutex *mutex = &execmem_cache.mutex;
void *area;

execmem_cache_rox = true;

mutex_lock(mutex);

mas_for_each(&mas_free, area, ULONG_MAX) {
unsigned long pages = mas_range_len(&mas_free) >> PAGE_SHIFT;
set_memory_ro(mas_free.index, pages);
}

mas_for_each(&mas_busy, area, ULONG_MAX) {
unsigned long pages = mas_range_len(&mas_busy) >> PAGE_SHIFT;
set_memory_ro(mas_busy.index, pages);
}

mutex_unlock(mutex);
}

static int execmem_cache_populate(struct execmem_range *range, size_t size)
{
unsigned long vm_flags = VM_ALLOW_HUGE_VMAP;
Expand All @@ -274,9 +302,15 @@ static int execmem_cache_populate(struct execmem_range *range, size_t size)
/* fill memory with instructions that will trap */
execmem_fill_trapping_insns(p, alloc_size, /* writable = */ true);

err = set_memory_rox((unsigned long)p, vm->nr_pages);
if (err)
goto err_free_mem;
if (execmem_cache_rox) {
err = set_memory_rox((unsigned long)p, vm->nr_pages);
if (err)
goto err_free_mem;
} else {
err = set_memory_x((unsigned long)p, vm->nr_pages);
if (err)
goto err_free_mem;
}

err = execmem_cache_add(p, alloc_size);
if (err)
Expand Down

0 comments on commit d6d1e3e

Please sign in to comment.