kernel_optimize_test/arch/xtensa/mm/mmu.c
Max Filippov e85e335f8f xtensa: add MMU v3 support
MMUv3 comes out of reset with identity vaddr -> paddr mapping in the TLB
way 6:

Way 6 (512 MB)
        Vaddr       Paddr       ASID  Attr RWX Cache
        ----------  ----------  ----  ---- --- -------
        0x00000000  0x00000000  0x01  0x03 RWX Bypass
        0x20000000  0x20000000  0x01  0x03 RWX Bypass
        0x40000000  0x40000000  0x01  0x03 RWX Bypass
        0x60000000  0x60000000  0x01  0x03 RWX Bypass
        0x80000000  0x80000000  0x01  0x03 RWX Bypass
        0xa0000000  0xa0000000  0x01  0x03 RWX Bypass
        0xc0000000  0xc0000000  0x01  0x03 RWX Bypass
        0xe0000000  0xe0000000  0x01  0x03 RWX Bypass

This patch adds remapping code at the reset vector or at the kernel
_start (depending on CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX) that
reconfigures MMUv3 as MMUv2:

Way 5 (128 MB)
        Vaddr       Paddr       ASID  Attr RWX Cache
        ----------  ----------  ----  ---- --- -------
        0xd0000000  0x00000000  0x01  0x07 RWX WB
        0xd8000000  0x00000000  0x01  0x03 RWX Bypass
Way 6 (256 MB)
        Vaddr       Paddr       ASID  Attr RWX Cache
        ----------  ----------  ----  ---- --- -------
        0xe0000000  0xf0000000  0x01  0x07 RWX WB
        0xf0000000  0xf0000000  0x01  0x03 RWX Bypass

Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
Signed-off-by: Chris Zankel <chris@zankel.net>
2013-05-09 01:07:09 -07:00

73 lines
1.6 KiB
C

/*
* xtensa mmu stuff
*
* Extracted from init.c
*/
#include <linux/percpu.h>
#include <linux/init.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/cache.h>
#include <asm/tlb.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>
#include <asm/page.h>
void __init paging_init(void)
{
memset(swapper_pg_dir, 0, PAGE_SIZE);
}
/*
* Flush the mmu and reset associated register to default values.
*/
void __init init_mmu(void)
{
#if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
/*
* Writing zeros to the instruction and data TLBCFG special
* registers ensure that valid values exist in the register.
*
* For existing PGSZID<w> fields, zero selects the first element
* of the page-size array. For nonexistent PGSZID<w> fields,
* zero is the best value to write. Also, when changing PGSZID<w>
* fields, the corresponding TLB must be flushed.
*/
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
#endif
flush_tlb_all();
/* Set rasid register to a known value. */
set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
/* Set PTEVADDR special register to the start of the page
* table, which is in kernel mappable space (ie. not
* statically mapped). This register's value is undefined on
* reset.
*/
set_ptevaddr_register(PGTABLE_START);
}
struct kmem_cache *pgtable_cache __read_mostly;
static void pgd_ctor(void *addr)
{
pte_t *ptep = (pte_t *)addr;
int i;
for (i = 0; i < 1024; i++, ptep++)
pte_clear(NULL, 0, ptep);
}
void __init pgtable_cache_init(void)
{
pgtable_cache = kmem_cache_create("pgd",
PAGE_SIZE, PAGE_SIZE,
SLAB_HWCACHE_ALIGN,
pgd_ctor);
}