Skip to content

Commit

Permalink
x86, relocs: Move ELF relocation handling to C
Browse files Browse the repository at this point in the history
Moves the relocation handling into C, after decompression. This requires
that the decompressed size is passed to the decompression routine as
well so that relocations can be found. Only kernels that need relocation
support will use the code (currently just x86_32), but this is laying
the ground work for 64-bit using it in support of KASLR.

Based on work by Neill Clift and Michael Davidson.

Signed-off-by: Kees Cook <keescook@chromium.org>
Link: http://lkml.kernel.org/r/20130708161517.GA4832@www.outflux.net
Acked-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
Kees Cook authored and H. Peter Anvin committed Aug 8, 2013
1 parent c095ba7 commit a021506
Show file tree
Hide file tree
Showing 8 changed files with 97 additions and 40 deletions.
8 changes: 6 additions & 2 deletions arch/x86/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -1716,9 +1716,10 @@ config X86_NEED_RELOCS
depends on X86_32 && RELOCATABLE

config PHYSICAL_ALIGN
hex "Alignment value to which kernel should be aligned" if X86_32
hex "Alignment value to which kernel should be aligned"
default "0x1000000"
range 0x2000 0x1000000
range 0x2000 0x1000000 if X86_32
range 0x200000 0x1000000 if X86_64
---help---
This value puts the alignment restrictions on physical address
where kernel is loaded and run from. Kernel is compiled for an
Expand All @@ -1736,6 +1737,9 @@ config PHYSICAL_ALIGN
end result is that kernel runs from a physical address meeting
above alignment restrictions.

On 32-bit this value must be a multiple of 0x2000. On 64-bit
this value must be a multiple of 0x200000.

Don't change this unless you know what you are doing.

config HOTPLUG_CPU
Expand Down
8 changes: 4 additions & 4 deletions arch/x86/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,10 @@ endif
# e.g.: obj-y += foo_$(BITS).o
export BITS

ifdef CONFIG_X86_NEED_RELOCS
LDFLAGS_vmlinux := --emit-relocs
endif

ifeq ($(CONFIG_X86_32),y)
BITS := 32
UTS_MACHINE := i386
Expand All @@ -25,10 +29,6 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_AFLAGS += $(biarch)
KBUILD_CFLAGS += $(biarch)

ifdef CONFIG_RELOCATABLE
LDFLAGS_vmlinux := --emit-relocs
endif

KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return

# Never want PIC in a 32-bit kernel, prevent breakage with GCC built
Expand Down
31 changes: 3 additions & 28 deletions arch/x86/boot/compressed/head_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -181,8 +181,9 @@ relocated:
/*
* Do the decompression, and jump to the new kernel..
*/
leal z_extract_offset_negative(%ebx), %ebp
/* push arguments for decompress_kernel: */
pushl $z_output_len /* decompressed length */
leal z_extract_offset_negative(%ebx), %ebp
pushl %ebp /* output address */
pushl $z_input_len /* input_len */
leal input_data(%ebx), %eax
Expand All @@ -191,33 +192,7 @@ relocated:
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
call decompress_kernel
addl $20, %esp

#if CONFIG_RELOCATABLE
/*
* Find the address of the relocations.
*/
leal z_output_len(%ebp), %edi

/*
* Calculate the delta between where vmlinux was compiled to run
* and where it was actually loaded.
*/
movl %ebp, %ebx
subl $LOAD_PHYSICAL_ADDR, %ebx
jz 2f /* Nothing to be done if loaded at compiled addr. */
/*
* Process relocations.
*/

1: subl $4, %edi
movl (%edi), %ecx
testl %ecx, %ecx
jz 2f
addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
jmp 1b
2:
#endif
addl $24, %esp

/*
* Jump to the decompressed kernel.
Expand Down
1 change: 1 addition & 0 deletions arch/x86/boot/compressed/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -338,6 +338,7 @@ relocated:
leaq input_data(%rip), %rdx /* input_data */
movl $z_input_len, %ecx /* input_len */
movq %rbp, %r8 /* output target address */
movq $z_output_len, %r9 /* decompressed length */
call decompress_kernel
popq %rsi

Expand Down
77 changes: 76 additions & 1 deletion arch/x86/boot/compressed/misc.c
Original file line number Diff line number Diff line change
Expand Up @@ -271,6 +271,79 @@ static void error(char *x)
asm("hlt");
}

#if CONFIG_X86_NEED_RELOCS
static void handle_relocations(void *output, unsigned long output_len)
{
int *reloc;
unsigned long delta, map, ptr;
unsigned long min_addr = (unsigned long)output;
unsigned long max_addr = min_addr + output_len;

/*
* Calculate the delta between where vmlinux was linked to load
* and where it was actually loaded.
*/
delta = min_addr - LOAD_PHYSICAL_ADDR;
if (!delta) {
debug_putstr("No relocation needed... ");
return;
}
debug_putstr("Performing relocations... ");

/*
* The kernel contains a table of relocation addresses. Those
* addresses have the final load address of the kernel in virtual
* memory. We are currently working in the self map. So we need to
* create an adjustment for kernel memory addresses to the self map.
* This will involve subtracting out the base address of the kernel.
*/
map = delta - __START_KERNEL_map;

/*
* Process relocations: 32 bit relocations first then 64 bit after.
* Two sets of binary relocations are added to the end of the kernel
* before compression. Each relocation table entry is the kernel
* address of the location which needs to be updated stored as a
* 32-bit value which is sign extended to 64 bits.
*
* Format is:
*
* kernel bits...
* 0 - zero terminator for 64 bit relocations
* 64 bit relocation repeated
* 0 - zero terminator for 32 bit relocations
* 32 bit relocation repeated
*
* So we work backwards from the end of the decompressed image.
*/
for (reloc = output + output_len - sizeof(*reloc); *reloc; reloc--) {
int extended = *reloc;
extended += map;

ptr = (unsigned long)extended;
if (ptr < min_addr || ptr > max_addr)
error("32-bit relocation outside of kernel!\n");

*(uint32_t *)ptr += delta;
}
#ifdef CONFIG_X86_64
for (reloc--; *reloc; reloc--) {
long extended = *reloc;
extended += map;

ptr = (unsigned long)extended;
if (ptr < min_addr || ptr > max_addr)
error("64-bit relocation outside of kernel!\n");

*(uint64_t *)ptr += delta;
}
#endif
}
#else
static inline void handle_relocations(void *output, unsigned long output_len)
{ }
#endif

static void parse_elf(void *output)
{
#ifdef CONFIG_X86_64
Expand Down Expand Up @@ -325,7 +398,8 @@ static void parse_elf(void *output)
asmlinkage void decompress_kernel(void *rmode, memptr heap,
unsigned char *input_data,
unsigned long input_len,
unsigned char *output)
unsigned char *output,
unsigned long output_len)
{
real_mode = rmode;

Expand Down Expand Up @@ -365,6 +439,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
debug_putstr("\nDecompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
parse_elf(output);
handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
return;
}
2 changes: 2 additions & 0 deletions arch/x86/include/asm/page_32_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
*/
#define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)

#define __START_KERNEL_map __PAGE_OFFSET

#define THREAD_SIZE_ORDER 1
#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)

Expand Down
5 changes: 0 additions & 5 deletions arch/x86/include/asm/page_64_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,11 +32,6 @@
*/
#define __PAGE_OFFSET _AC(0xffff880000000000, UL)

#define __PHYSICAL_START ((CONFIG_PHYSICAL_START + \
(CONFIG_PHYSICAL_ALIGN - 1)) & \
~(CONFIG_PHYSICAL_ALIGN - 1))

#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)

/* See Documentation/x86/x86_64/mm.txt for a description of the memory map. */
Expand Down
5 changes: 5 additions & 0 deletions arch/x86/include/asm/page_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,11 @@
(((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)

#define __PHYSICAL_START ALIGN(CONFIG_PHYSICAL_START, \
CONFIG_PHYSICAL_ALIGN)

#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)

#ifdef CONFIG_X86_64
#include <asm/page_64_types.h>
#else
Expand Down

0 comments on commit a021506

Please sign in to comment.