Merge branch 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86-32: Make sure the stack is set up before we use it
  x86, mtrr: Avoid MTRR reprogramming on BP during boot on UP platforms
  x86, nx: Don't force pages RW when setting NX bits
This commit is contained in:
Linus Torvalds 2011-02-06 12:03:10 -08:00
commit 07675f484b
6 changed files with 26 additions and 33 deletions

View File

@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
/* Static state in head.S used to set up a CPU */
extern struct {
void *sp;
unsigned short ss;
} stack_start;
extern unsigned long stack_start; /* Initial stack pointer address */
struct smp_ops {
void (*smp_prepare_boot_cpu)(void);

View File

@ -100,7 +100,7 @@ int acpi_save_state_mem(void)
#else /* CONFIG_64BIT */
header->trampoline_segment = setup_trampoline() >> 4;
#ifdef CONFIG_SMP
stack_start.sp = temp_stack + sizeof(temp_stack);
stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
early_gdt_descr.address =
(unsigned long)get_cpu_gdt_table(smp_processor_id());
initial_gs = per_cpu_offset(smp_processor_id());

View File

@ -793,13 +793,21 @@ void set_mtrr_aps_delayed_init(void)
}
/*
* MTRR initialization for all AP's
* Delayed MTRR initialization for all AP's
*/
void mtrr_aps_init(void)
{
if (!use_intel())
return;
/*
* Check if someone has requested the delay of AP MTRR initialization,
* by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
* then we are done.
*/
if (!mtrr_aps_delayed_init)
return;
set_mtrr(~0U, 0, 0, 0);
mtrr_aps_delayed_init = false;
}

View File

@ -85,6 +85,8 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
*/
__HEAD
ENTRY(startup_32)
movl pa(stack_start),%ecx
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
us to not reload segments */
testb $(1<<6), BP_loadflags(%esi)
@ -99,7 +101,9 @@ ENTRY(startup_32)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movl %eax,%ss
2:
leal -__PAGE_OFFSET(%ecx),%esp
/*
* Clear BSS first so that there are no surprises...
@ -145,8 +149,6 @@ ENTRY(startup_32)
* _brk_end is set up to point to the first "safe" location.
* Mappings are created both at virtual address 0 (identity mapping)
* and PAGE_OFFSET for up to _end.
*
* Note that the stack is not yet set up!
*/
#ifdef CONFIG_X86_PAE
@ -282,6 +284,9 @@ ENTRY(startup_32_smp)
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movl pa(stack_start),%ecx
movl %eax,%ss
leal -__PAGE_OFFSET(%ecx),%esp
#endif /* CONFIG_SMP */
default_entry:
@ -347,8 +352,8 @@ default_entry:
movl %eax,%cr0 /* ..and set paging (PG) bit */
ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
1:
/* Set up the stack pointer */
lss stack_start,%esp
/* Shift the stack pointer to a virtual address */
addl $__PAGE_OFFSET, %esp
/*
* Initialize eflags. Some BIOS's leave bits like NT set. This would
@ -360,9 +365,7 @@ default_entry:
#ifdef CONFIG_SMP
cmpb $0, ready
jz 1f /* Initial CPU cleans BSS */
jmp checkCPUtype
1:
jnz checkCPUtype
#endif /* CONFIG_SMP */
/*
@ -470,14 +473,7 @@ is386: movl $2,%ecx # set MP
cld # gcc2 wants the direction flag cleared at all times
pushl $0 # fake return address for unwinder
#ifdef CONFIG_SMP
movb ready, %cl
movb $1, ready
cmpb $0,%cl # the first CPU calls start_kernel
je 1f
movl (stack_start), %esp
1:
#endif /* CONFIG_SMP */
jmp *(initial_code)
/*
@ -670,15 +666,15 @@ ENTRY(initial_page_table)
#endif
.data
.balign 4
ENTRY(stack_start)
.long init_thread_union+THREAD_SIZE
.long __BOOT_DS
ready: .byte 0
early_recursion_flag:
.long 0
ready: .byte 0
int_msg:
.asciz "Unknown interrupt or fault at: %p %p %p\n"

View File

@ -638,7 +638,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
* target processor state.
*/
startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
(unsigned long)stack_start.sp);
stack_start);
/*
* Run STARTUP IPI loop.
@ -785,7 +785,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
#endif
early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
initial_code = (unsigned long)start_secondary;
stack_start.sp = (void *) c_idle.idle->thread.sp;
stack_start = c_idle.idle->thread.sp;
/* start_ip had better be page-aligned! */
start_ip = setup_trampoline();

View File

@ -256,7 +256,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
unsigned long pfn)
{
pgprot_t forbidden = __pgprot(0);
pgprot_t required = __pgprot(0);
/*
* The BIOS area between 640k and 1Mb needs to be executable for
@ -282,12 +281,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
__pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW;
/*
* .data and .bss should always be writable.
*/
if (within(address, (unsigned long)_sdata, (unsigned long)_edata) ||
within(address, (unsigned long)__bss_start, (unsigned long)__bss_stop))
pgprot_val(required) |= _PAGE_RW;
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
/*
@ -327,7 +320,6 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
#endif
prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
prot = __pgprot(pgprot_val(prot) | pgprot_val(required));
return prot;
}