Skip to content

Commit

Permalink
LoongArch: Add KASAN (Kernel Address Sanitizer) support
Browse files Browse the repository at this point in the history
1/8 of kernel addresses reserved for shadow memory. But for LoongArch,
There are a lot of holes between different segments and valid address
space (256T available) is insufficient to map all these segments to kasan
shadow memory with the common formula provided by kasan core, saying
(addr >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET

So LoongArch has a arch-specific mapping formula, different segments are
mapped individually, and only limited space lengths of these specific
segments are mapped to shadow.

At early boot stage the whole shadow region populated with just one
physical page (kasan_early_shadow_page). Later, this page is reused as
readonly zero shadow for some memory that kasan currently don't track.
After mapping the physical memory, pages for shadow memory are allocated
and mapped.

Functions like memset()/memcpy()/memmove() do a lot of memory accesses.
If bad pointer passed to one of these function it is important to be
caught. Compiler's instrumentation cannot do this since these functions
are written in assembly.

KASan replaces memory functions with manually instrumented variants.
Original functions declared as weak symbols so strong definitions in
mm/kasan/kasan.c could replace them. Original functions have aliases
with '__' prefix in names, so we could call non-instrumented variant
if needed.

Signed-off-by: Qing Zhang <zhangqing@loongson.cn>
Signed-off-by: Huacai Chen <chenhuacai@loongson.cn>
  • Loading branch information
Qing Zhang authored and Huacai Chen committed Sep 6, 2023
1 parent 9fbcc07 commit 5aa4ac6
Show file tree
Hide file tree
Showing 17 changed files with 455 additions and 13 deletions.
4 changes: 2 additions & 2 deletions Documentation/dev-tools/kasan.rst
Original file line number Diff line number Diff line change
Expand Up @@ -41,8 +41,8 @@ Support
Architectures
~~~~~~~~~~~~~

Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, and
xtensa, and the tag-based KASAN modes are supported only on arm64.
Generic KASAN is supported on x86_64, arm, arm64, powerpc, riscv, s390, xtensa,
and loongarch, and the tag-based KASAN modes are supported only on arm64.

Compilers
~~~~~~~~~
Expand Down
2 changes: 1 addition & 1 deletion Documentation/features/debug/KASAN/arch-support.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
| csky: | TODO |
| hexagon: | TODO |
| ia64: | TODO |
| loongarch: | TODO |
| loongarch: | ok |
| m68k: | TODO |
| microblaze: | TODO |
| mips: | TODO |
Expand Down
2 changes: 1 addition & 1 deletion Documentation/translations/zh_CN/dev-tools/kasan.rst
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ KASAN有三种模式:
体系架构
~~~~~~~~

在x86_64、arm、arm64、powerpc、riscv、s390和xtensa上支持通用KASAN
在x86_64、arm、arm64、powerpc、riscv、s390、xtensa和loongarch上支持通用KASAN
而基于标签的KASAN模式只在arm64上支持。

编译器
Expand Down
7 changes: 7 additions & 0 deletions arch/loongarch/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ config LOONGARCH
select ACPI_PPTT if ACPI
select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
select ARCH_BINFMT_ELF_STATE
select ARCH_DISABLE_KASAN_INLINE
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
Expand Down Expand Up @@ -92,6 +93,7 @@ config LOONGARCH
select HAVE_ARCH_AUDITSYSCALL
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
select HAVE_ARCH_KFENCE
select HAVE_ARCH_KGDB if PERF_EVENTS
select HAVE_ARCH_MMAP_RND_BITS if MMU
Expand Down Expand Up @@ -669,6 +671,11 @@ config ARCH_MMAP_RND_BITS_MAX
config ARCH_SUPPORTS_UPROBES
def_bool y

config KASAN_SHADOW_OFFSET
hex
default 0x0
depends on KASAN

menu "Power management options"

config ARCH_SUSPEND_POSSIBLE
Expand Down
3 changes: 3 additions & 0 deletions arch/loongarch/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,10 @@ LDFLAGS_vmlinux += -static -pie --no-dynamic-linker -z notext
endif

cflags-y += $(call cc-option, -mno-check-zero-division)

ifndef CONFIG_KASAN
cflags-y += -fno-builtin-memcpy -fno-builtin-memmove -fno-builtin-memset
endif

load-y = 0x9000000000200000
bootvars-y = VMLINUX_LOAD_ADDRESS=$(load-y)
Expand Down
126 changes: 126 additions & 0 deletions arch/loongarch/include/asm/kasan.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef __ASM_KASAN_H
#define __ASM_KASAN_H

#ifndef __ASSEMBLY__

#include <linux/linkage.h>
#include <linux/mmzone.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/pgtable.h>

#define __HAVE_ARCH_SHADOW_MAP

#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)

#define XRANGE_SHIFT (48)

/* Valid address length */
#define XRANGE_SHADOW_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3)
/* Used for taking out the valid address */
#define XRANGE_SHADOW_MASK GENMASK_ULL(XRANGE_SHADOW_SHIFT - 1, 0)
/* One segment whole address space size */
#define XRANGE_SIZE (XRANGE_SHADOW_MASK + 1)

/* 64-bit segment value. */
#define XKPRANGE_UC_SEG (0x8000)
#define XKPRANGE_CC_SEG (0x9000)
#define XKVRANGE_VC_SEG (0xffff)

/* Cached */
#define XKPRANGE_CC_START CACHE_BASE
#define XKPRANGE_CC_SIZE XRANGE_SIZE
#define XKPRANGE_CC_KASAN_OFFSET (0)
#define XKPRANGE_CC_SHADOW_SIZE (XKPRANGE_CC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
#define XKPRANGE_CC_SHADOW_END (XKPRANGE_CC_KASAN_OFFSET + XKPRANGE_CC_SHADOW_SIZE)

/* UnCached */
#define XKPRANGE_UC_START UNCACHE_BASE
#define XKPRANGE_UC_SIZE XRANGE_SIZE
#define XKPRANGE_UC_KASAN_OFFSET XKPRANGE_CC_SHADOW_END
#define XKPRANGE_UC_SHADOW_SIZE (XKPRANGE_UC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
#define XKPRANGE_UC_SHADOW_END (XKPRANGE_UC_KASAN_OFFSET + XKPRANGE_UC_SHADOW_SIZE)

/* VMALLOC (Cached or UnCached) */
#define XKVRANGE_VC_START MODULES_VADDR
#define XKVRANGE_VC_SIZE round_up(KFENCE_AREA_END - MODULES_VADDR + 1, PGDIR_SIZE)
#define XKVRANGE_VC_KASAN_OFFSET XKPRANGE_UC_SHADOW_END
#define XKVRANGE_VC_SHADOW_SIZE (XKVRANGE_VC_SIZE >> KASAN_SHADOW_SCALE_SHIFT)
#define XKVRANGE_VC_SHADOW_END (XKVRANGE_VC_KASAN_OFFSET + XKVRANGE_VC_SHADOW_SIZE)

/* KAsan shadow memory start right after vmalloc. */
#define KASAN_SHADOW_START round_up(KFENCE_AREA_END, PGDIR_SIZE)
#define KASAN_SHADOW_SIZE (XKVRANGE_VC_SHADOW_END - XKPRANGE_CC_KASAN_OFFSET)
#define KASAN_SHADOW_END round_up(KASAN_SHADOW_START + KASAN_SHADOW_SIZE, PGDIR_SIZE)

#define XKPRANGE_CC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_CC_KASAN_OFFSET)
#define XKPRANGE_UC_SHADOW_OFFSET (KASAN_SHADOW_START + XKPRANGE_UC_KASAN_OFFSET)
#define XKVRANGE_VC_SHADOW_OFFSET (KASAN_SHADOW_START + XKVRANGE_VC_KASAN_OFFSET)

extern bool kasan_early_stage;
extern unsigned char kasan_early_shadow_page[PAGE_SIZE];

#define kasan_arch_is_ready kasan_arch_is_ready
static __always_inline bool kasan_arch_is_ready(void)
{
return !kasan_early_stage;
}

static inline void *kasan_mem_to_shadow(const void *addr)
{
if (!kasan_arch_is_ready()) {
return (void *)(kasan_early_shadow_page);
} else {
unsigned long maddr = (unsigned long)addr;
unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
unsigned long offset = 0;

maddr &= XRANGE_SHADOW_MASK;
switch (xrange) {
case XKPRANGE_CC_SEG:
offset = XKPRANGE_CC_SHADOW_OFFSET;
break;
case XKPRANGE_UC_SEG:
offset = XKPRANGE_UC_SHADOW_OFFSET;
break;
case XKVRANGE_VC_SEG:
offset = XKVRANGE_VC_SHADOW_OFFSET;
break;
default:
WARN_ON(1);
return NULL;
}

return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
}
}

static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
unsigned long addr = (unsigned long)shadow_addr;

if (unlikely(addr > KASAN_SHADOW_END) ||
unlikely(addr < KASAN_SHADOW_START)) {
WARN_ON(1);
return NULL;
}

if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
else {
WARN_ON(1);
return NULL;
}
}

void kasan_init(void);
asmlinkage void kasan_early_init(void);

#endif
#endif
7 changes: 7 additions & 0 deletions arch/loongarch/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -89,9 +89,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
#endif

#define VMALLOC_START MODULES_END

#ifndef CONFIG_KASAN
#define VMALLOC_END \
(vm_map_base + \
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits)) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
#else
#define VMALLOC_END \
(vm_map_base + \
min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, (1UL << cpu_vabits) / 2) - PMD_SIZE - VMEMMAP_SIZE - KFENCE_AREA_SIZE)
#endif

#define vmemmap ((struct page *)((VMALLOC_END + PMD_SIZE) & PMD_MASK))
#define VMEMMAP_END ((unsigned long)vmemmap + VMEMMAP_SIZE - 1)
Expand Down
20 changes: 20 additions & 0 deletions arch/loongarch/include/asm/string.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,31 @@

#define __HAVE_ARCH_MEMSET
extern void *memset(void *__s, int __c, size_t __count);
extern void *__memset(void *__s, int __c, size_t __count);

#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *__to, __const__ void *__from, size_t __n);
extern void *__memcpy(void *__to, __const__ void *__from, size_t __n);

#define __HAVE_ARCH_MEMMOVE
extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);

#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)

/*
* For files that are not instrumented (e.g. mm/slub.c) we
* should use not instrumented version of mem* functions.
*/

#define memset(s, c, n) __memset(s, c, n)
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)

#ifndef __NO_FORTIFY
#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
#endif

#endif

#endif /* _ASM_STRING_H */
6 changes: 6 additions & 0 deletions arch/loongarch/kernel/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,12 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
endif

KASAN_SANITIZE_efi.o := n
KASAN_SANITIZE_cpu-probe.o := n
KASAN_SANITIZE_traps.o := n
KASAN_SANITIZE_smp.o := n
KASAN_SANITIZE_vdso.o := n

obj-$(CONFIG_MODULES) += module.o module-sections.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o

Expand Down
4 changes: 4 additions & 0 deletions arch/loongarch/kernel/head.S
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,10 @@ SYM_CODE_START(kernel_entry) # kernel entry point

#endif /* CONFIG_RELOCATABLE */

#ifdef CONFIG_KASAN
bl kasan_early_init
#endif

bl start_kernel
ASM_BUG()

Expand Down
4 changes: 4 additions & 0 deletions arch/loongarch/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -626,4 +626,8 @@ void __init setup_arch(char **cmdline_p)
#endif

paging_init();

#ifdef CONFIG_KASAN
kasan_init();
#endif
}
8 changes: 7 additions & 1 deletion arch/loongarch/lib/memcpy.S
Original file line number Diff line number Diff line change
Expand Up @@ -10,16 +10,22 @@
#include <asm/cpu.h>
#include <asm/regdef.h>

.section .noinstr.text, "ax"

SYM_FUNC_START(memcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memcpy_generic", \
"b __memcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memcpy)
_ASM_NOKPROBE(memcpy)
SYM_FUNC_ALIAS(__memcpy, memcpy)

EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)

_ASM_NOKPROBE(memcpy)
_ASM_NOKPROBE(__memcpy)

/*
* void *__memcpy_generic(void *dst, const void *src, size_t n)
Expand Down
20 changes: 13 additions & 7 deletions arch/loongarch/lib/memmove.S
Original file line number Diff line number Diff line change
Expand Up @@ -10,23 +10,29 @@
#include <asm/cpu.h>
#include <asm/regdef.h>

.section .noinstr.text, "ax"

SYM_FUNC_START(memmove)
blt a0, a1, memcpy /* dst < src, memcpy */
blt a1, a0, rmemcpy /* src < dst, rmemcpy */
jr ra /* dst == src, return */
blt a0, a1, __memcpy /* dst < src, memcpy */
blt a1, a0, __rmemcpy /* src < dst, rmemcpy */
jr ra /* dst == src, return */
SYM_FUNC_END(memmove)
_ASM_NOKPROBE(memmove)
SYM_FUNC_ALIAS(__memmove, memmove)

EXPORT_SYMBOL(memmove)
EXPORT_SYMBOL(__memmove)

_ASM_NOKPROBE(memmove)
_ASM_NOKPROBE(__memmove)

SYM_FUNC_START(rmemcpy)
SYM_FUNC_START(__rmemcpy)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __rmemcpy_generic", \
"b __rmemcpy_fast", CPU_FEATURE_UAL
SYM_FUNC_END(rmemcpy)
_ASM_NOKPROBE(rmemcpy)
SYM_FUNC_END(__rmemcpy)
_ASM_NOKPROBE(__rmemcpy)

/*
* void *__rmemcpy_generic(void *dst, const void *src, size_t n)
Expand Down
8 changes: 7 additions & 1 deletion arch/loongarch/lib/memset.S
Original file line number Diff line number Diff line change
Expand Up @@ -16,16 +16,22 @@
bstrins.d \r0, \r0, 63, 32
.endm

.section .noinstr.text, "ax"

SYM_FUNC_START(memset)
/*
* Some CPUs support hardware unaligned access
*/
ALTERNATIVE "b __memset_generic", \
"b __memset_fast", CPU_FEATURE_UAL
SYM_FUNC_END(memset)
_ASM_NOKPROBE(memset)
SYM_FUNC_ALIAS(__memset, memset)

EXPORT_SYMBOL(memset)
EXPORT_SYMBOL(__memset)

_ASM_NOKPROBE(memset)
_ASM_NOKPROBE(__memset)

/*
* void *__memset_generic(void *s, int c, size_t n)
Expand Down
3 changes: 3 additions & 0 deletions arch/loongarch/mm/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,3 +7,6 @@ obj-y += init.o cache.o tlb.o tlbex.o extable.o \
fault.o ioremap.o maccess.o mmap.o pgtable.o page.o

obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
obj-$(CONFIG_KASAN) += kasan_init.o

KASAN_SANITIZE_kasan_init.o := n
Loading

0 comments on commit 5aa4ac6

Please sign in to comment.