2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2006-09-27 14:13:36 +08:00
|
|
|
* This file contains the functions and defines necessary to modify and
|
|
|
|
* use the SuperH page table tree.
|
|
|
|
*
|
2005-04-17 06:20:36 +08:00
|
|
|
* Copyright (C) 1999 Niibe Yutaka
|
2007-11-19 17:26:19 +08:00
|
|
|
* Copyright (C) 2002 - 2007 Paul Mundt
|
2006-09-27 14:13:36 +08:00
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU General
|
|
|
|
* Public License. See the file "COPYING" in the main directory of this
|
|
|
|
* archive for more details.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-09-27 14:13:36 +08:00
|
|
|
#ifndef __ASM_SH_PGTABLE_H
|
|
|
|
#define __ASM_SH_PGTABLE_H
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2010-01-13 18:11:14 +08:00
|
|
|
#ifdef CONFIG_X2TLB
|
2010-01-13 18:18:39 +08:00
|
|
|
#include <asm/pgtable-3level.h>
|
2009-12-13 22:38:50 +08:00
|
|
|
#else
|
2010-01-13 18:18:39 +08:00
|
|
|
#include <asm/pgtable-2level.h>
|
2009-12-13 22:38:50 +08:00
|
|
|
#endif
|
2006-09-27 14:13:36 +08:00
|
|
|
#include <asm/page.h>
|
2011-05-31 13:37:44 +08:00
|
|
|
#include <asm/mmu.h>
|
2006-09-27 14:13:36 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#ifndef __ASSEMBLY__
|
|
|
|
#include <asm/addrspace.h>
|
|
|
|
#include <asm/fixmap.h>
|
|
|
|
|
|
|
|
/*
|
|
|
|
* ZERO_PAGE is a global shared page that is always zero: used
|
|
|
|
* for zero-mapped memory areas etc..
|
|
|
|
*/
|
2006-09-27 14:13:36 +08:00
|
|
|
extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
|
2005-04-17 06:20:36 +08:00
|
|
|
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
|
|
|
|
|
|
|
|
#endif /* !__ASSEMBLY__ */
|
|
|
|
|
2007-11-10 18:16:55 +08:00
|
|
|
/*
|
|
|
|
* Effective and physical address definitions, to aid with sign
|
|
|
|
* extension.
|
|
|
|
*/
|
|
|
|
#define NEFF 32
|
|
|
|
#define NEFF_SIGN (1LL << (NEFF - 1))
|
|
|
|
#define NEFF_MASK (-1LL << NEFF)
|
|
|
|
|
2009-08-04 16:14:39 +08:00
|
|
|
static inline unsigned long long neff_sign_extend(unsigned long val)
|
|
|
|
{
|
|
|
|
unsigned long long extended = val;
|
|
|
|
return (extended & NEFF_SIGN) ? (extended | NEFF_MASK) : extended;
|
|
|
|
}
|
|
|
|
|
2007-11-10 18:16:55 +08:00
|
|
|
#ifdef CONFIG_29BIT
|
|
|
|
#define NPHYS 29
|
|
|
|
#else
|
|
|
|
#define NPHYS 32
|
|
|
|
#endif
|
|
|
|
|
|
|
|
#define NPHYS_SIGN (1LL << (NPHYS - 1))
|
|
|
|
#define NPHYS_MASK (-1LL << NPHYS)
|
|
|
|
|
2007-02-14 13:13:10 +08:00
|
|
|
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
|
2005-04-17 06:20:36 +08:00
|
|
|
#define PGDIR_MASK (~(PGDIR_SIZE-1))
|
|
|
|
|
2006-11-20 13:30:26 +08:00
|
|
|
/* Entries per level */
|
2006-12-26 14:29:19 +08:00
|
|
|
#define PTRS_PER_PTE (PAGE_SIZE / (1 << PTE_MAGNITUDE))
|
2006-11-20 13:30:26 +08:00
|
|
|
|
2005-04-20 04:29:23 +08:00
|
|
|
#define FIRST_USER_ADDRESS 0
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2009-10-07 05:22:25 +08:00
|
|
|
#define PHYS_ADDR_MASK29 0x1fffffff
|
|
|
|
#define PHYS_ADDR_MASK32 0xffffffff
|
|
|
|
|
|
|
|
static inline unsigned long phys_addr_mask(void)
|
|
|
|
{
|
|
|
|
/* Is the MMU in 29bit mode? */
|
|
|
|
if (__in_29bit_mode())
|
|
|
|
return PHYS_ADDR_MASK29;
|
|
|
|
|
|
|
|
return PHYS_ADDR_MASK32;
|
|
|
|
}
|
2007-11-30 16:52:53 +08:00
|
|
|
|
2009-10-07 05:22:25 +08:00
|
|
|
#define PTE_PHYS_MASK (phys_addr_mask() & PAGE_MASK)
|
2008-09-12 19:41:05 +08:00
|
|
|
#define PTE_FLAGS_MASK (~(PTE_PHYS_MASK) << PAGE_SHIFT)
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-10 19:39:06 +08:00
|
|
|
#ifdef CONFIG_SUPERH32
|
2007-07-25 09:43:47 +08:00
|
|
|
#define VMALLOC_START (P3SEG)
|
2007-11-10 19:39:06 +08:00
|
|
|
#else
|
|
|
|
#define VMALLOC_START (0xf0000000)
|
|
|
|
#endif
|
2005-04-17 06:20:36 +08:00
|
|
|
#define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
|
|
|
|
|
2007-11-19 17:26:19 +08:00
|
|
|
#if defined(CONFIG_SUPERH32)
|
|
|
|
#include <asm/pgtable_32.h>
|
2006-11-20 13:30:26 +08:00
|
|
|
#else
|
2007-11-19 17:26:19 +08:00
|
|
|
#include <asm/pgtable_64.h>
|
2005-04-17 06:20:36 +08:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/*
|
2006-11-20 13:30:26 +08:00
|
|
|
* SH-X and lower (legacy) SuperH parts (SH-3, SH-4, some SH-4A) can't do page
|
|
|
|
* protection for execute, and considers it the same as a read. Also, write
|
|
|
|
* permission implies read permission. This is the closest we can get..
|
|
|
|
*
|
|
|
|
* SH-X2 (SH7785) and later parts take this to the opposite end of the extreme,
|
|
|
|
* not only supporting separate execute, read, and write bits, but having
|
|
|
|
* completely separate permission bits for user and kernel space.
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2006-11-20 13:30:26 +08:00
|
|
|
/*xwr*/
|
2005-04-17 06:20:36 +08:00
|
|
|
#define __P000 PAGE_NONE
|
|
|
|
#define __P001 PAGE_READONLY
|
|
|
|
#define __P010 PAGE_COPY
|
|
|
|
#define __P011 PAGE_COPY
|
2006-11-20 13:30:26 +08:00
|
|
|
#define __P100 PAGE_EXECREAD
|
|
|
|
#define __P101 PAGE_EXECREAD
|
2005-04-17 06:20:36 +08:00
|
|
|
#define __P110 PAGE_COPY
|
|
|
|
#define __P111 PAGE_COPY
|
|
|
|
|
|
|
|
#define __S000 PAGE_NONE
|
|
|
|
#define __S001 PAGE_READONLY
|
2006-11-20 13:30:26 +08:00
|
|
|
#define __S010 PAGE_WRITEONLY
|
2005-04-17 06:20:36 +08:00
|
|
|
#define __S011 PAGE_SHARED
|
2006-11-20 13:30:26 +08:00
|
|
|
#define __S100 PAGE_EXECREAD
|
|
|
|
#define __S101 PAGE_EXECREAD
|
|
|
|
#define __S110 PAGE_RWX
|
|
|
|
#define __S111 PAGE_RWX
|
2005-04-17 06:20:36 +08:00
|
|
|
|
|
|
|
typedef pte_t *pte_addr_t;
|
|
|
|
|
|
|
|
#define kern_addr_valid(addr) (1)
|
|
|
|
|
|
|
|
#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
|
|
|
|
remap_pfn_range(vma, vaddr, pfn, size, prot)
|
|
|
|
|
2007-11-19 17:26:19 +08:00
|
|
|
#define pte_pfn(x) ((unsigned long)(((x).pte_low >> PAGE_SHIFT)))
|
2005-11-07 16:59:43 +08:00
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
/*
|
2009-12-31 20:19:24 +08:00
|
|
|
* Initialise the page table caches
|
2005-04-17 06:20:36 +08:00
|
|
|
*/
|
2009-12-31 20:19:24 +08:00
|
|
|
extern void pgtable_cache_init(void);
|
2005-04-17 06:20:36 +08:00
|
|
|
|
2007-11-19 17:26:19 +08:00
|
|
|
struct vm_area_struct;
|
2011-01-14 14:46:50 +08:00
|
|
|
struct mm_struct;
|
2009-07-28 23:12:17 +08:00
|
|
|
|
|
|
|
extern void __update_cache(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t pte);
|
|
|
|
extern void __update_tlb(struct vm_area_struct *vma,
|
|
|
|
unsigned long address, pte_t pte);
|
|
|
|
|
|
|
|
static inline void
|
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-12-19 00:40:18 +08:00
|
|
|
update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
|
2009-07-28 23:12:17 +08:00
|
|
|
{
|
MM: Pass a PTE pointer to update_mmu_cache() rather than the PTE itself
On VIVT ARM, when we have multiple shared mappings of the same file
in the same MM, we need to ensure that we have coherency across all
copies. We do this via make_coherent() by making the pages
uncacheable.
This used to work fine, until we allowed highmem with highpte - we
now have a page table which is mapped as required, and is not available
for modification via update_mmu_cache().
Ralf Beache suggested getting rid of the PTE value passed to
update_mmu_cache():
On MIPS update_mmu_cache() calls __update_tlb() which walks pagetables
to construct a pointer to the pte again. Passing a pte_t * is much
more elegant. Maybe we might even replace the pte argument with the
pte_t?
Ben Herrenschmidt would also like the pte pointer for PowerPC:
Passing the ptep in there is exactly what I want. I want that
-instead- of the PTE value, because I have issue on some ppc cases,
for I$/D$ coherency, where set_pte_at() may decide to mask out the
_PAGE_EXEC.
So, pass in the mapped page table pointer into update_mmu_cache(), and
remove the PTE value, updating all implementations and call sites to
suit.
Includes a fix from Stephen Rothwell:
sparc: fix fallout from update_mmu_cache API change
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
2009-12-19 00:40:18 +08:00
|
|
|
pte_t pte = *ptep;
|
2009-07-28 23:12:17 +08:00
|
|
|
__update_cache(vma, address, pte);
|
|
|
|
__update_tlb(vma, address, pte);
|
|
|
|
}
|
|
|
|
|
2006-11-20 13:30:26 +08:00
|
|
|
extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
|
|
|
|
extern void paging_init(void);
|
2007-12-17 09:52:11 +08:00
|
|
|
extern void page_table_range_init(unsigned long start, unsigned long end,
|
|
|
|
pgd_t *pgd);
|
2006-11-20 13:30:26 +08:00
|
|
|
|
2009-05-07 15:38:16 +08:00
|
|
|
/* arch/sh/mm/mmap.c */
|
|
|
|
#define HAVE_ARCH_UNMAPPED_AREA
|
|
|
|
#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
|
|
|
|
|
2010-10-27 15:40:19 +08:00
|
|
|
#define __HAVE_ARCH_PTE_SPECIAL
|
|
|
|
|
2005-04-17 06:20:36 +08:00
|
|
|
#include <asm-generic/pgtable.h>
|
|
|
|
|
2007-11-19 17:26:19 +08:00
|
|
|
#endif /* __ASM_SH_PGTABLE_H */
|