Skip to content

Commit

Permalink
Merge tag 'riscv-for-linux-5.6-rc4' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:
 "This contains a handful of RISC-V related fixes that I've collected
  and would like to target for 5.6-rc4:

   - A fix to set up the PMPs on boot, which allows the kernel to access
     memory on systems that don't set up permissive PMPs before getting
     to Linux. This only effects machine-mode kernels, which currently
     means only NOMMU kernels.

   - A fix to avoid enabling supervisor-mode interrupts when running in
     machine-mode, also only for NOMMU kernels.

   - A pair of fixes to our KASAN support to avoid corrupting memory.

   - A gitignore fix.

  This boots on QEMU's virt board for me"

* tag 'riscv-for-linux-5.6-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: adjust the indent
  riscv: allocate a complete page size for each page table
  riscv: Fix gitignore
  RISC-V: Don't enable all interrupts in trap_init()
  riscv: set pmp configuration if kernel is running in M-mode
  • Loading branch information
Linus Torvalds committed Feb 25, 2020
2 parents d67f250 + 8458ca1 commit c5f8689
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 24 deletions.
2 changes: 2 additions & 0 deletions arch/riscv/boot/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
Image
Image.gz
loader
loader.lds
12 changes: 12 additions & 0 deletions arch/riscv/include/asm/csr.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,16 @@
#define EXC_LOAD_PAGE_FAULT 13
#define EXC_STORE_PAGE_FAULT 15

/* PMP configuration */
#define PMP_R 0x01
#define PMP_W 0x02
#define PMP_X 0x04
#define PMP_A 0x18
#define PMP_A_TOR 0x08
#define PMP_A_NA4 0x10
#define PMP_A_NAPOT 0x18
#define PMP_L 0x80

/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
Expand Down Expand Up @@ -100,6 +110,8 @@
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
#define CSR_PMPCFG0 0x3a0
#define CSR_PMPADDR0 0x3b0
#define CSR_MHARTID 0xf14

#ifdef CONFIG_RISCV_M_MODE
Expand Down
6 changes: 6 additions & 0 deletions arch/riscv/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,12 @@ _start_kernel:
/* Reset all registers except ra, a0, a1 */
call reset_regs

/* Setup a PMP to permit access to all of memory. */
li a0, -1
csrw CSR_PMPADDR0, a0
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0

/*
* The hartid in a0 is expected later on, and we have no firmware
* to hand it to us.
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/kernel/traps.c
Original file line number Diff line number Diff line change
Expand Up @@ -156,6 +156,6 @@ void __init trap_init(void)
csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
/* Enable all interrupts */
csr_write(CSR_IE, -1);
/* Enable interrupts */
csr_write(CSR_IE, IE_SIE | IE_EIE);
}
53 changes: 31 additions & 22 deletions arch/riscv/mm/kasan_init.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,56 +19,64 @@ asmlinkage void __init kasan_early_init(void)
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
mk_pte(virt_to_page(kasan_early_shadow_page),
PAGE_KERNEL));
PAGE_KERNEL));

for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
__pgprot(_PAGE_TABLE)));
pfn_pmd(PFN_DOWN
(__pa((uintptr_t) kasan_early_shadow_pte)),
__pgprot(_PAGE_TABLE)));

for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
pfn_pgd(PFN_DOWN
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));

/* init for swapper_pg_dir */
pgd = pgd_offset_k(KASAN_SHADOW_START);

for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));
pfn_pgd(PFN_DOWN
(__pa(((uintptr_t) kasan_early_shadow_pmd))),
__pgprot(_PAGE_TABLE)));

flush_tlb_all();
}

static void __init populate(void *start, void *end)
{
unsigned long i;
unsigned long i, offset;
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
unsigned long n_ptes =
((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
unsigned long n_pmds =
(n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
n_pages / PTRS_PER_PTE;
((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;

pte_t *pte =
memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
pmd_t *pmd =
memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
pgd_t *pgd = pgd_offset_k(vaddr);
pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);

for (i = 0; i < n_pages; i++) {
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);

set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}

for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
set_pmd(&pmd[i],
pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
__pgprot(_PAGE_TABLE)));

for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
set_pgd(&pgd[i],
pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
__pgprot(_PAGE_TABLE)));

flush_tlb_all();
Expand All @@ -81,7 +89,8 @@ void __init kasan_init(void)
unsigned long i;

kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
(void *)kasan_mem_to_shadow((void *)VMALLOC_END));
(void *)kasan_mem_to_shadow((void *)
VMALLOC_END));

for_each_memblock(memory, reg) {
void *start = (void *)__va(reg->base);
Expand All @@ -90,14 +99,14 @@ void __init kasan_init(void)
if (start >= end)
break;

populate(kasan_mem_to_shadow(start),
kasan_mem_to_shadow(end));
populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
};

for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
mk_pte(virt_to_page(kasan_early_shadow_page),
__pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
__pgprot(_PAGE_PRESENT | _PAGE_READ |
_PAGE_ACCESSED)));

memset(kasan_early_shadow_page, 0, PAGE_SIZE);
init_task.kasan_depth = 0;
Expand Down

0 comments on commit c5f8689

Please sign in to comment.