forked from luck/tmp_suning_uos_patched
576d2f2525
Now that we have all the static mappings from iotable_init() located in the vmalloc area, it is trivial to optimize ioremap by reusing those static mappings when the requested physical area fits in one of them, and so in a generic way for all platforms. Signed-off-by: Nicolas Pitre <nicolas.pitre@linaro.org> Tested-by: Stephen Warren <swarren@nvidia.com> Tested-by: Kevin Hilman <khilman@ti.com> Tested-by: Jamie Iles <jamie@jamieiles.com>
48 lines
1.0 KiB
C
48 lines
1.0 KiB
C
#ifdef CONFIG_MMU
|
|
|
|
/* the upper-most page table pointer */
|
|
extern pmd_t *top_pmd;
|
|
|
|
#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
|
|
|
|
static inline pmd_t *pmd_off_k(unsigned long virt)
|
|
{
|
|
return pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
|
|
}
|
|
|
|
struct mem_type {
|
|
pteval_t prot_pte;
|
|
pmdval_t prot_l1;
|
|
pmdval_t prot_sect;
|
|
unsigned int domain;
|
|
};
|
|
|
|
const struct mem_type *get_mem_type(unsigned int type);
|
|
|
|
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
|
|
|
|
/*
|
|
* ARM specific vm_struct->flags bits.
|
|
*/
|
|
|
|
/* (super)section-mapped I/O regions used by ioremap()/iounmap() */
|
|
#define VM_ARM_SECTION_MAPPING 0x80000000
|
|
|
|
/* permanent static mappings from iotable_init() */
|
|
#define VM_ARM_STATIC_MAPPING 0x40000000
|
|
|
|
/* mapping type (attributes) for permanent static mappings */
|
|
#define VM_ARM_MTYPE(mt) ((mt) << 20)
|
|
#define VM_ARM_MTYPE_MASK (0x1f << 20)
|
|
|
|
#endif
|
|
|
|
#ifdef CONFIG_ZONE_DMA
|
|
extern u32 arm_dma_limit;
|
|
#else
|
|
#define arm_dma_limit ((u32)~0)
|
|
#endif
|
|
|
|
void __init bootmem_init(void);
|
|
void arm_mm_memblock_reserve(void);
|