Skip to content

Commit

Permalink
x86, boot: straighten out ranges to copy/zero in compressed/head*.S
Browse files Browse the repository at this point in the history
Both on 32 and 64 bits, we copy all the way up to the end of bss,
except that on 64 bits there is a hack to avoid copying on top of the
page tables.  There is no point in copying bss at all, especially
since we are just about to zero it all anyway.

To clean up and unify the handling, we now do:

  - copy from startup_32 to _bss.
  - zero from _bss to _ebss.
  - the _ebss symbol is aligned to an 8-byte boundary.
  - the page tables are moved to a separate section.

Use _bss as the copy endpoint since _edata may be misaligned.

[ Impact: cleanup, trivial performance improvement ]

Signed-off-by: H. Peter Anvin <hpa@zytor.com>
  • Loading branch information
H. Peter Anvin committed May 9, 2009
1 parent b40d68d commit 5b11f1c
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 16 deletions.
8 changes: 4 additions & 4 deletions arch/x86/boot/compressed/head_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -93,9 +93,9 @@ ENTRY(startup_32)
* where decompression in place becomes safe.
*/
pushl %esi
leal _ebss(%ebp), %esi
leal _ebss(%ebx), %edi
movl $(_ebss - startup_32), %ecx
leal _bss(%ebp), %esi
leal _bss(%ebx), %edi
movl $(_bss - startup_32), %ecx
std
rep movsb
cld
Expand Down Expand Up @@ -125,7 +125,7 @@ relocated:
* Clear BSS
*/
xorl %eax, %eax
leal _edata(%ebx), %edi
leal _bss(%ebx), %edi
leal _ebss(%ebx), %ecx
subl %edi, %ecx
cld
Expand Down
18 changes: 13 additions & 5 deletions arch/x86/boot/compressed/head_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -253,9 +253,9 @@ ENTRY(startup_64)
* Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
leaq _end_before_pgt(%rip), %r8
leaq _end_before_pgt(%rbx), %r9
movq $_end_before_pgt /* - $startup_32 */, %rcx
leaq _bss(%rip), %r8
leaq _bss(%rbx), %r9
movq $_bss /* - $startup_32 */, %rcx
1: subq $8, %r8
subq $8, %r9
movq 0(%r8), %rax
Expand All @@ -276,8 +276,8 @@ relocated:
* Clear BSS
*/
xorq %rax, %rax
leaq _edata(%rbx), %rdi
leaq _end_before_pgt(%rbx), %rcx
leaq _bss(%rbx), %rdi
leaq _ebss(%rbx), %rcx
subq %rdi, %rcx
cld
rep stosb
Expand Down Expand Up @@ -329,3 +329,11 @@ boot_heap:
boot_stack:
.fill BOOT_STACK_SIZE, 1, 0
boot_stack_end:

/*
* Space for page tables (not in .bss so not zeroed)
*/
.section ".pgtable","a",@nobits
.balign 4096
pgtable:
.fill 6*4096, 1, 0
19 changes: 12 additions & 7 deletions arch/x86/boot/compressed/vmlinux.lds.S
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)

#undef i386

#include <asm/page_types.h>

#ifdef CONFIG_X86_64
OUTPUT_ARCH(i386:x86-64)
ENTRY(startup_64)
Expand Down Expand Up @@ -48,13 +50,16 @@ SECTIONS
*(.bss)
*(.bss.*)
*(COMMON)
#ifdef CONFIG_X86_64
. = ALIGN(8);
_end_before_pgt = . ;
. = ALIGN(4096);
pgtable = . ;
. = . + 4096 * 6;
#endif
. = ALIGN(8); /* For convenience during zeroing */
_ebss = .;
}
#ifdef CONFIG_X86_64
. = ALIGN(PAGE_SIZE);
.pgtable : {
_pgtable = . ;
*(.pgtable)
_epgtable = . ;
}
#endif
_end = .;
}

0 comments on commit 5b11f1c

Please sign in to comment.