From f6c2e3330d3fdd5474bc3756da46fca889a30e33 Mon Sep 17 00:00:00 2001 From: "Siddha, Suresh B" Date: Sat, 5 Nov 2005 17:25:53 +0100 Subject: [PATCH] x86_64: Unmap NULL during early bootup We should zap the low mappings, as soon as possible, so that we can catch kernel bugs more effectively. Previously early boot had NULL mapped and didn't trap on NULL references. This patch introduces boot_level4_pgt, which will always have low identity addresses mapped. Druing boot, all the processors will use this as their level4 pgt. On BP, we will switch to init_level4_pgt as soon as we enter C code and zap the low mappings as soon as we are done with the usage of identity low mapped addresses. On AP's we will zap the low mappings as soon as we jump to C code. Signed-off-by: Suresh Siddha Signed-off-by: Ashok Raj Signed-off-by: Andi Kleen Signed-off-by: Linus Torvalds --- arch/x86_64/kernel/head.S | 37 +++++++++++++++++++++++-------------- arch/x86_64/kernel/head64.c | 8 ++++++++ arch/x86_64/kernel/mpparse.c | 2 +- arch/x86_64/kernel/setup.c | 2 ++ arch/x86_64/kernel/setup64.c | 2 +- arch/x86_64/kernel/smpboot.c | 3 --- arch/x86_64/mm/init.c | 28 +++++++++++++++++----------- 7 files changed, 52 insertions(+), 30 deletions(-) (limited to 'arch/x86_64') diff --git a/arch/x86_64/kernel/head.S b/arch/x86_64/kernel/head.S index b92e5f4..1529096 100644 --- a/arch/x86_64/kernel/head.S +++ b/arch/x86_64/kernel/head.S @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -70,7 +71,7 @@ startup_32: movl %eax, %cr4 /* Setup early boot stage 4 level pagetables */ - movl $(init_level4_pgt - __START_KERNEL_map), %eax + movl $(boot_level4_pgt - __START_KERNEL_map), %eax movl %eax, %cr3 /* Setup EFER (Extended Feature Enable Register) */ @@ -113,7 +114,7 @@ startup_64: movq %rax, %cr4 /* Setup early boot stage 4 level pagetables. */ - movq $(init_level4_pgt - __START_KERNEL_map), %rax + movq $(boot_level4_pgt - __START_KERNEL_map), %rax movq %rax, %cr3 /* Check if nx is implemented */ @@ -240,20 +241,10 @@ ljumpvector: ENTRY(stext) ENTRY(_stext) - /* - * This default setting generates an ident mapping at address 0x100000 - * and a mapping for the kernel that precisely maps virtual address - * 0xffffffff80000000 to physical address 0x000000. (always using - * 2Mbyte large pages provided by PAE mode) - */ .org 0x1000 ENTRY(init_level4_pgt) - .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */ - .fill 255,8,0 - .quad 0x000000000000a007 + __PHYSICAL_START - .fill 254,8,0 - /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ - .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ + /* This gets initialized in x86_64_start_kernel */ + .fill 512,8,0 .org 0x2000 ENTRY(level3_ident_pgt) @@ -350,6 +341,24 @@ ENTRY(wakeup_level4_pgt) .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ #endif +#ifndef CONFIG_HOTPLUG_CPU + __INITDATA +#endif + /* + * This default setting generates an ident mapping at address 0x100000 + * and a mapping for the kernel that precisely maps virtual address + * 0xffffffff80000000 to physical address 0x000000. (always using + * 2Mbyte large pages provided by PAE mode) + */ + .align PAGE_SIZE +ENTRY(boot_level4_pgt) + .quad 0x0000000000002007 + __PHYSICAL_START /* -> level3_ident_pgt */ + .fill 255,8,0 + .quad 0x000000000000a007 + __PHYSICAL_START + .fill 254,8,0 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ + .quad 0x0000000000003007 + __PHYSICAL_START /* -> level3_kernel_pgt */ + .data .align 16 diff --git a/arch/x86_64/kernel/head64.c b/arch/x86_64/kernel/head64.c index cf6ab14..e0f12d4 100644 --- a/arch/x86_64/kernel/head64.c +++ b/arch/x86_64/kernel/head64.c @@ -19,6 +19,7 @@ #include #include #include +#include /* Don't add a printk in there. printk relies on the PDA which is not initialized yet. */ @@ -86,6 +87,13 @@ void __init x86_64_start_kernel(char * real_mode_data) set_intr_gate(i, early_idt_handler); asm volatile("lidt %0" :: "m" (idt_descr)); clear_bss(); + + /* + * switch to init_level4_pgt from boot_level4_pgt + */ + memcpy(init_level4_pgt, boot_level4_pgt, PTRS_PER_PGD*sizeof(pgd_t)); + asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); + pda_init(0); copy_bootdata(real_mode_data); #ifdef CONFIG_SMP diff --git a/arch/x86_64/kernel/mpparse.c b/arch/x86_64/kernel/mpparse.c index 8f6958e..4a581d1 100644 --- a/arch/x86_64/kernel/mpparse.c +++ b/arch/x86_64/kernel/mpparse.c @@ -549,7 +549,7 @@ void __init get_smp_config (void) * Read the physical hardware table. Anything here will * override the defaults. */ - if (!smp_read_mpc((void *)(unsigned long)mpf->mpf_physptr)) { + if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) { smp_found_config = 0; printk(KERN_ERR "BIOS bug, MP table errors detected!...\n"); printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n"); diff --git a/arch/x86_64/kernel/setup.c b/arch/x86_64/kernel/setup.c index 99cfa75..f76f374 100644 --- a/arch/x86_64/kernel/setup.c +++ b/arch/x86_64/kernel/setup.c @@ -571,6 +571,8 @@ void __init setup_arch(char **cmdline_p) init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT)); + zap_low_mappings(0); + #ifdef CONFIG_ACPI /* * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). diff --git a/arch/x86_64/kernel/setup64.c b/arch/x86_64/kernel/setup64.c index 7919089..06dc354 100644 --- a/arch/x86_64/kernel/setup64.c +++ b/arch/x86_64/kernel/setup64.c @@ -141,7 +141,6 @@ void pda_init(int cpu) panic("cannot allocate irqstack for cpu %d", cpu); } - asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); pda->irqstackptr += IRQSTACKSIZE-64; } @@ -197,6 +196,7 @@ void __cpuinit cpu_init (void) /* CPU 0 is initialised in head64.c */ if (cpu != 0) { pda_init(cpu); + zap_low_mappings(cpu); } else estacks = boot_exception_stacks; diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c index 658a81b..3393fc0 100644 --- a/arch/x86_64/kernel/smpboot.c +++ b/arch/x86_64/kernel/smpboot.c @@ -1063,9 +1063,6 @@ int __cpuinit __cpu_up(unsigned int cpu) */ void __init smp_cpus_done(unsigned int max_cpus) { -#ifndef CONFIG_HOTPLUG_CPU - zap_low_mappings(); -#endif smp_cleanup_boot(); #ifdef CONFIG_X86_IO_APIC diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c index 2b1d6c3..be483a1 100644 --- a/arch/x86_64/mm/init.c +++ b/arch/x86_64/mm/init.c @@ -312,12 +312,19 @@ void __init init_memory_mapping(unsigned long start, unsigned long end) extern struct x8664_pda cpu_pda[NR_CPUS]; -/* Assumes all CPUs still execute in init_mm */ -void zap_low_mappings(void) +void __cpuinit zap_low_mappings(int cpu) { - pgd_t *pgd = pgd_offset_k(0UL); - pgd_clear(pgd); - flush_tlb_all(); + if (cpu == 0) { + pgd_t *pgd = pgd_offset_k(0UL); + pgd_clear(pgd); + } else { + /* + * For AP's, zap the low identity mappings by changing the cr3 + * to init_level4_pgt and doing local flush tlb all + */ + asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt))); + } + __flush_tlb_all(); } /* Compute zone sizes for the DMA and DMA32 zones in a node. */ @@ -474,14 +481,13 @@ void __init mem_init(void) datasize >> 10, initsize >> 10); +#ifdef CONFIG_SMP /* - * Subtle. SMP is doing its boot stuff late (because it has to - * fork idle threads) - but it also needs low mappings for the - * protected-mode entry to work. We zap these entries only after - * the WP-bit has been tested. + * Sync boot_level4_pgt mappings with the init_level4_pgt + * except for the low identity mappings which are already zapped + * in init_level4_pgt. This sync-up is essential for AP's bringup */ -#ifndef CONFIG_SMP - zap_low_mappings(); + memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t)); #endif } -- cgit v1.1