Skip to content

Commit

Permalink
riscv: Cleanup KASAN_VMALLOC support
Browse files Browse the repository at this point in the history
When KASAN vmalloc region is populated, there is no userspace process and
the page table in use is swapper_pg_dir, so there is no need to read
SATP. Then we can use the same scheme used by kasan_populate_p*d
functions to go through the page table, which harmonizes the code.

In addition, make use of set_pgd that goes through all unused page table
levels, contrary to p*d_populate functions, which makes this function work
whatever the number of page table levels.

Signed-off-by: Alexandre Ghiti <alex@ghiti.fr>
Reviewed-by: Palmer Dabbelt <palmerdabbelt@google.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
  • Loading branch information
Alexandre Ghiti authored and Palmer Dabbelt committed Mar 30, 2021
1 parent f35bb4b commit 2da073c
Showing 1 changed file with 18 additions and 41 deletions.
59 changes: 18 additions & 41 deletions arch/riscv/mm/kasan_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,6 @@
#include <asm/fixmap.h>
#include <asm/pgalloc.h>

static __init void *early_alloc(size_t size, int node)
{
void *ptr = memblock_alloc_try_nid(size, size,
__pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);

if (!ptr)
panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
__func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));

return ptr;
}

extern pgd_t early_pg_dir[PTRS_PER_PGD];
asmlinkage void __init kasan_early_init(void)
{
Expand Down Expand Up @@ -155,38 +143,27 @@ static void __init kasan_populate(void *start, void *end)
memset(start, KASAN_SHADOW_INIT, end - start);
}

void __init kasan_shallow_populate(void *start, void *end)
static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long end)
{
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
unsigned long pfn;
int index;
unsigned long next;
void *p;
pud_t *pud_dir, *pud_k;
pgd_t *pgd_dir, *pgd_k;
p4d_t *p4d_dir, *p4d_k;

while (vaddr < vend) {
index = pgd_index(vaddr);
pfn = csr_read(CSR_SATP) & SATP_PPN;
pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
pgd_dir = pgd_offset_k(vaddr);
set_pgd(pgd_dir, *pgd_k);

p4d_dir = p4d_offset(pgd_dir, vaddr);
p4d_k = p4d_offset(pgd_k, vaddr);

vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
pud_dir = pud_offset(p4d_dir, vaddr);
pud_k = pud_offset(p4d_k, vaddr);

if (pud_present(*pud_dir)) {
p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
pud_populate(&init_mm, pud_dir, p);
pgd_t *pgd_k = pgd_offset_k(vaddr);

do {
next = pgd_addr_end(vaddr, end);
if (pgd_page_vaddr(*pgd_k) == (unsigned long)lm_alias(kasan_early_shadow_pmd)) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
vaddr += PAGE_SIZE;
}
} while (pgd_k++, vaddr = next, vaddr != end);
}

static void __init kasan_shallow_populate(void *start, void *end)
{
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);

kasan_shallow_populate_pgd(vaddr, vend);
}

void __init kasan_init(void)
Expand Down

0 comments on commit 2da073c

Please sign in to comment.