forked from luck/tmp_suning_uos_patched
656be92f9a
This is a patch to load 64-bit modules to CKSEG0 so that can be compiled with -msym32 option. This makes each module ~10% smaller. * introduce MODULE_START and MODULE_END * custom module_alloc() * PGD for modules * change XTLB refill handler synthesizer * enable -msym32 for modules again (revert ca78b1a5c6a6e70e052d3ea253828e49b5d07c8a) New XTLB refill handler looks like this: 80000080 dmfc0 k0,C0_BADVADDR 80000084 bltz k0,800000e4 # goto l_module_alloc 80000088 lui k1,0x8046 # %high(pgd_current) 8000008c ld k1,24600(k1) # %low(pgd_current) 80000090 dsrl k0,k0,0x1b # l_vmalloc_done: 80000094 andi k0,k0,0x1ff8 80000098 daddu k1,k1,k0 8000009c dmfc0 k0,C0_BADVADDR 800000a0 ld k1,0(k1) 800000a4 dsrl k0,k0,0x12 800000a8 andi k0,k0,0xff8 800000ac daddu k1,k1,k0 800000b0 dmfc0 k0,C0_XCONTEXT 800000b4 ld k1,0(k1) 800000b8 andi k0,k0,0xff0 800000bc daddu k1,k1,k0 800000c0 ld k0,0(k1) 800000c4 ld k1,8(k1) 800000c8 dsrl k0,k0,0x6 800000cc mtc0 k0,C0_ENTRYLO0 800000d0 dsrl k1,k1,0x6 800000d4 mtc0 k1,C0_ENTRYL01 800000d8 nop 800000dc tlbwr 800000e0 eret 800000e4 dsll k1,k0,0x2 # l_module_alloc: 800000e8 bgez k1,80000008 # goto l_vmalloc 800000ec lui k1,0xc000 800000f0 dsubu k0,k0,k1 800000f4 lui k1,0x8046 # %high(module_pg_dir) 800000f8 beq zero,zero,80000000 800000fc nop 80000000 beq zero,zero,80000090 # goto l_vmalloc_done 80000004 daddiu k1,k1,0x4000 80000008 dsll32 k1,k1,0x0 # l_vmalloc: 8000000c dsubu k0,k0,k1 80000010 beq zero,zero,80000090 # goto l_vmalloc_done 80000014 lui k1,0x8046 # %high(swapper_pg_dir) Signed-off-by: Atsushi Nemoto <anemo@mba.ocn.ne.jp> Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
73 lines
1.7 KiB
C
73 lines
1.7 KiB
C
/*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 1999, 2000 by Silicon Graphics
|
|
* Copyright (C) 2003 by Ralf Baechle
|
|
*/
|
|
#include <linux/init.h>
|
|
#include <linux/mm.h>
|
|
#include <asm/fixmap.h>
|
|
#include <asm/pgtable.h>
|
|
|
|
void pgd_init(unsigned long page)
|
|
{
|
|
unsigned long *p, *end;
|
|
|
|
p = (unsigned long *) page;
|
|
end = p + PTRS_PER_PGD;
|
|
|
|
while (p < end) {
|
|
p[0] = (unsigned long) invalid_pmd_table;
|
|
p[1] = (unsigned long) invalid_pmd_table;
|
|
p[2] = (unsigned long) invalid_pmd_table;
|
|
p[3] = (unsigned long) invalid_pmd_table;
|
|
p[4] = (unsigned long) invalid_pmd_table;
|
|
p[5] = (unsigned long) invalid_pmd_table;
|
|
p[6] = (unsigned long) invalid_pmd_table;
|
|
p[7] = (unsigned long) invalid_pmd_table;
|
|
p += 8;
|
|
}
|
|
}
|
|
|
|
void pmd_init(unsigned long addr, unsigned long pagetable)
|
|
{
|
|
unsigned long *p, *end;
|
|
|
|
p = (unsigned long *) addr;
|
|
end = p + PTRS_PER_PMD;
|
|
|
|
while (p < end) {
|
|
p[0] = (unsigned long)pagetable;
|
|
p[1] = (unsigned long)pagetable;
|
|
p[2] = (unsigned long)pagetable;
|
|
p[3] = (unsigned long)pagetable;
|
|
p[4] = (unsigned long)pagetable;
|
|
p[5] = (unsigned long)pagetable;
|
|
p[6] = (unsigned long)pagetable;
|
|
p[7] = (unsigned long)pagetable;
|
|
p += 8;
|
|
}
|
|
}
|
|
|
|
void __init pagetable_init(void)
|
|
{
|
|
unsigned long vaddr;
|
|
pgd_t *pgd_base;
|
|
|
|
/* Initialize the entire pgd. */
|
|
pgd_init((unsigned long)swapper_pg_dir);
|
|
#ifdef MODULE_START
|
|
pgd_init((unsigned long)module_pg_dir);
|
|
#endif
|
|
pmd_init((unsigned long)invalid_pmd_table, (unsigned long)invalid_pte_table);
|
|
|
|
pgd_base = swapper_pg_dir;
|
|
/*
|
|
* Fixed mappings:
|
|
*/
|
|
vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
|
|
fixrange_init(vaddr, 0, pgd_base);
|
|
}
|