Skip to content

Commit

Permalink
Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/li…
Browse files Browse the repository at this point in the history
…nux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86-32: Make sure the stack is set up before we use it
  x86, mtrr: Avoid MTRR reprogramming on BP during boot on UP platforms
  x86, nx: Don't force pages RW when setting NX bits
  • Loading branch information
Linus Torvalds committed Feb 6, 2011
2 parents 585a7c6 + 11d4c3f commit 07675f4
Show file tree
Hide file tree
Showing 6 changed files with 26 additions and 33 deletions.
5 changes: 1 addition & 4 deletions arch/x86/include/asm/smp.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);

/* Static state in head.S used to set up a CPU */
extern struct {
void *sp;
unsigned short ss;
} stack_start;
extern unsigned long stack_start; /* Initial stack pointer address */

struct smp_ops {
void (*smp_prepare_boot_cpu)(void);
Expand Down
2 changes: 1 addition & 1 deletion arch/x86/kernel/acpi/sleep.c
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ int acpi_save_state_mem(void)
#else /* CONFIG_64BIT */
header->trampoline_segment = setup_trampoline() >> 4;
#ifdef CONFIG_SMP
stack_start.sp = temp_stack + sizeof(temp_stack);
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id());
initial_gs = per_cpu_offset(smp_processor_id());
Expand Down
10 changes: 9 additions & 1 deletion arch/x86/kernel/cpu/mtrr/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
}

/*
* MTRR initialization for all AP's
* Delayed MTRR initialization for all AP's
*/
void mtrr_aps_init(void)
{
if (!use_intel())
return;

/*
* Check if someone has requested the delay of AP MTRR initialization,
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
if (!mtrr_aps_delayed_init)
return;

set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false;
}
Expand Down
30 changes: 13 additions & 17 deletions arch/x86/kernel/head_32.S
Original file line number Diff line number Diff line change
Expand Up @@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
*/
__HEAD
ENTRY(startup_32)
movl pa(stack_start),%ecx

/* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */
testb $(1<<6), BP_loadflags(%esi)
Expand All @@ -99,7 +101,9 @@ ENTRY(startup_32)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movl %eax,%ss
2:
leal -__PAGE_OFFSET(%ecx),%esp

/*
* Clear BSS first so that there are no surprises...
Expand Down Expand Up @@ -145,8 +149,6 @@ ENTRY(startup_32)
* _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end.
*
* Note that the stack is not yet set up!
*/
#ifdef CONFIG_X86_PAE

Expand Down Expand Up @@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movl pa(stack_start),%ecx
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
#endif /* CONFIG_SMP */
default_entry:

Expand Down Expand Up @@ -347,8 +352,8 @@ default_entry:
movl %eax,%cr0 /* ..and set paging (PG) bit */
ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
1:
/* Set up the stack pointer */
lss stack_start,%esp
/* Shift the stack pointer to a virtual address */
addl $__PAGE_OFFSET, %esp

/*
* Initialize eflags. Some BIOS's leave bits like NT set. This would
Expand All @@ -360,9 +365,7 @@ default_entry:

#ifdef CONFIG_SMP
cmpb $0, ready
jz 1f /* Initial CPU cleans BSS */
jmp checkCPUtype
1:
jnz checkCPUtype
#endif /* CONFIG_SMP */

/*
Expand Down Expand Up @@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP

cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder
#ifdef CONFIG_SMP
movb ready, %cl
movb $1, ready
cmpb $0,%cl # the first CPU calls start_kernel
je 1f
movl (stack_start), %esp
1:
#endif /* CONFIG_SMP */
jmp *(initial_code)

/*
Expand Down Expand Up @@ -670,15 +666,15 @@ ENTRY(initial_page_table)
#endif

.data
.balign 4
ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE
.long __BOOT_DS

ready: .byte 0

early_recursion_flag:
.long 0

ready: .byte 0

int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n"

Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/smpboot.c
Original file line number Diff line number Diff line change
Expand Up @@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
* target processor state.
*/
startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
(unsigned long)stack_start.sp);
stack_start);

/*
* Run STARTUP IPI loop.
Expand Down Expand Up @@ -785,7 +785,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
stack_start.sp = (void *) c_idle.idle->thread.sp;
stack_start = c_idle.idle->thread.sp;

/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();
Expand Down
8 changes: 0 additions & 8 deletions arch/x86/mm/pageattr.c
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
unsigned long pfn)
{
pgprot_t forbidden = __pgprot(0);
pgprot_t required = __pgprot(0);

/*
* The BIOS area between 640k and 1Mb needs to be executable for
Expand All @@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
/*
* .data and .bss should always be writable.
*/
if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
pgprot_val(required) |= _PAGE_RW;

#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
/*
Expand Down Expand Up @@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
#endif

prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
prot = __pgprot(pgprot_val(prot) | pgprot_val(required));

return prot;
}
Expand Down

0 comments on commit 07675f4

Please sign in to comment.