forked from luck/tmp_suning_uos_patched
parisc: use pgtable-nopXd instead of 4level-fixup
parisc has two or three levels of page tables and can use appropriate pgtable-nopXd and folding of the upper layers. Replace usage of include/asm-generic/4level-fixup.h and explicit definitions of __PAGETABLE_PxD_FOLDED in parisc with include/asm-generic/pgtable-nopmd.h for two-level configurations and with include/asm-generic/pgtable-nopud.h for three-lelve configurations and adjust page table manipulation macros and functions accordingly. Link: http://lkml.kernel.org/r/1572938135-31886-9-git-send-email-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Acked-by: Helge Deller <deller@gmx.de> Cc: Anatoly Pugachev <matorola@gmail.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Peter Rosin <peda@axentia.se> Cc: Richard Weinberger <richard@nod.at> Cc: Rolf Eike Beer <eike-kernel@sf-tec.de> Cc: Russell King <linux@armlinux.org.uk> Cc: Russell King <rmk+kernel@armlinux.org.uk> Cc: Sam Creasey <sammy@sammy.net> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
7c2763c423
commit
d96885e277
|
@ -42,48 +42,54 @@ typedef struct { unsigned long pte; } pte_t; /* either 32 or 64bit */
|
|||
|
||||
/* NOTE: even on 64 bits, these entries are __u32 because we allocate
|
||||
* the pmd and pgd in ZONE_DMA (i.e. under 4GB) */
|
||||
typedef struct { __u32 pmd; } pmd_t;
|
||||
typedef struct { __u32 pgd; } pgd_t;
|
||||
typedef struct { unsigned long pgprot; } pgprot_t;
|
||||
|
||||
#define pte_val(x) ((x).pte)
|
||||
/* These do not work lvalues, so make sure we don't use them as such. */
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
typedef struct { __u32 pmd; } pmd_t;
|
||||
#define __pmd(x) ((pmd_t) { (x) } )
|
||||
/* pXd_val() do not work as lvalues, so make sure we don't use them as such. */
|
||||
#define pmd_val(x) ((x).pmd + 0)
|
||||
#endif
|
||||
|
||||
#define pte_val(x) ((x).pte)
|
||||
#define pgd_val(x) ((x).pgd + 0)
|
||||
#define pgprot_val(x) ((x).pgprot)
|
||||
|
||||
#define __pte(x) ((pte_t) { (x) } )
|
||||
#define __pmd(x) ((pmd_t) { (x) } )
|
||||
#define __pgd(x) ((pgd_t) { (x) } )
|
||||
#define __pgprot(x) ((pgprot_t) { (x) } )
|
||||
|
||||
#define __pmd_val_set(x,n) (x).pmd = (n)
|
||||
#define __pgd_val_set(x,n) (x).pgd = (n)
|
||||
|
||||
#else
|
||||
/*
|
||||
* .. while these make it easier on the compiler
|
||||
*/
|
||||
typedef unsigned long pte_t;
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
typedef __u32 pmd_t;
|
||||
#define pmd_val(x) (x)
|
||||
#define __pmd(x) (x)
|
||||
#endif
|
||||
|
||||
typedef __u32 pgd_t;
|
||||
typedef unsigned long pgprot_t;
|
||||
|
||||
#define pte_val(x) (x)
|
||||
#define pmd_val(x) (x)
|
||||
#define pgd_val(x) (x)
|
||||
#define pgprot_val(x) (x)
|
||||
|
||||
#define __pte(x) (x)
|
||||
#define __pmd(x) (x)
|
||||
#define __pgd(x) (x)
|
||||
#define __pgprot(x) (x)
|
||||
|
||||
#define __pmd_val_set(x,n) (x) = (n)
|
||||
#define __pgd_val_set(x,n) (x) = (n)
|
||||
|
||||
#endif /* STRICT_MM_TYPECHECKS */
|
||||
|
||||
#define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define set_pud(pudptr, pudval) (*(pudptr) = (pudval))
|
||||
#endif
|
||||
|
||||
typedef struct page *pgtable_t;
|
||||
|
||||
typedef struct __physmem_range {
|
||||
|
|
|
@ -34,13 +34,13 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
|
|||
/* Populate first pmd with allocated memory. We mark it
|
||||
* with PxD_FLAG_ATTACHED as a signal to the system that this
|
||||
* pmd entry may not be cleared. */
|
||||
__pgd_val_set(*actual_pgd, (PxD_FLAG_PRESENT |
|
||||
set_pgd(actual_pgd, __pgd((PxD_FLAG_PRESENT |
|
||||
PxD_FLAG_VALID |
|
||||
PxD_FLAG_ATTACHED)
|
||||
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
|
||||
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT)));
|
||||
/* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
|
||||
* a signal that this pmd may not be freed */
|
||||
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
|
||||
set_pgd(pgd, __pgd(PxD_FLAG_ATTACHED));
|
||||
#endif
|
||||
}
|
||||
spin_lock_init(pgd_spinlock(actual_pgd));
|
||||
|
@ -59,10 +59,10 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
|
|||
|
||||
/* Three Level Page Table Support for pmd's */
|
||||
|
||||
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
|
||||
static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
|
||||
{
|
||||
__pgd_val_set(*pgd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
|
||||
(__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
|
||||
set_pud(pud, __pud((PxD_FLAG_PRESENT | PxD_FLAG_VALID) +
|
||||
(__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT)));
|
||||
}
|
||||
|
||||
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
|
||||
|
@ -88,19 +88,6 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
|
|||
free_pages((unsigned long)pmd, PMD_ORDER);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* Two Level Page Table Support for pmd's */
|
||||
|
||||
/*
|
||||
* allocating and freeing a pmd is trivial: the 1-entry pmd is
|
||||
* inside the pgd, so has no extra memory associated with it.
|
||||
*/
|
||||
|
||||
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
|
||||
#define pmd_free(mm, x) do { } while (0)
|
||||
#define pgd_populate(mm, pmd, pte) BUG()
|
||||
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
|
@ -110,14 +97,14 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
|
|||
/* preserve the gateway marker if this is the beginning of
|
||||
* the permanent pmd */
|
||||
if(pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
|
||||
__pmd_val_set(*pmd, (PxD_FLAG_PRESENT |
|
||||
set_pmd(pmd, __pmd((PxD_FLAG_PRESENT |
|
||||
PxD_FLAG_VALID |
|
||||
PxD_FLAG_ATTACHED)
|
||||
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
|
||||
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
|
||||
else
|
||||
#endif
|
||||
__pmd_val_set(*pmd, (PxD_FLAG_PRESENT | PxD_FLAG_VALID)
|
||||
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT));
|
||||
set_pmd(pmd, __pmd((PxD_FLAG_PRESENT | PxD_FLAG_VALID)
|
||||
+ (__u32)(__pa((unsigned long)pte) >> PxD_VALUE_SHIFT)));
|
||||
}
|
||||
|
||||
#define pmd_populate(mm, pmd, pte_page) \
|
||||
|
|
|
@ -3,7 +3,12 @@
|
|||
#define _PARISC_PGTABLE_H
|
||||
|
||||
#include <asm/page.h>
|
||||
#include <asm-generic/4level-fixup.h>
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#include <asm-generic/pgtable-nopud.h>
|
||||
#elif CONFIG_PGTABLE_LEVELS == 2
|
||||
#include <asm-generic/pgtable-nopmd.h>
|
||||
#endif
|
||||
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
|
@ -101,8 +106,10 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
|
||||
#define pte_ERROR(e) \
|
||||
printk("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define pmd_ERROR(e) \
|
||||
printk("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, (unsigned long)pmd_val(e))
|
||||
#endif
|
||||
#define pgd_ERROR(e) \
|
||||
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
|
||||
|
||||
|
@ -132,19 +139,18 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
|
|||
#define PTRS_PER_PTE (1UL << BITS_PER_PTE)
|
||||
|
||||
/* Definitions for 2nd level */
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define PMD_SHIFT (PLD_SHIFT + BITS_PER_PTE)
|
||||
#define PMD_SIZE (1UL << PMD_SHIFT)
|
||||
#define PMD_MASK (~(PMD_SIZE-1))
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define BITS_PER_PMD (PAGE_SHIFT + PMD_ORDER - BITS_PER_PMD_ENTRY)
|
||||
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
|
||||
#else
|
||||
#define __PAGETABLE_PMD_FOLDED 1
|
||||
#define BITS_PER_PMD 0
|
||||
#endif
|
||||
#define PTRS_PER_PMD (1UL << BITS_PER_PMD)
|
||||
|
||||
/* Definitions for 1st level */
|
||||
#define PGDIR_SHIFT (PMD_SHIFT + BITS_PER_PMD)
|
||||
#define PGDIR_SHIFT (PLD_SHIFT + BITS_PER_PTE + BITS_PER_PMD)
|
||||
#if (PGDIR_SHIFT + PAGE_SHIFT + PGD_ORDER - BITS_PER_PGD_ENTRY) > BITS_PER_LONG
|
||||
#define BITS_PER_PGD (BITS_PER_LONG - PGDIR_SHIFT)
|
||||
#else
|
||||
|
@ -317,6 +323,8 @@ extern unsigned long *empty_zero_page;
|
|||
|
||||
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
|
||||
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
|
||||
#define pud_flag(x) (pud_val(x) & PxD_FLAG_MASK)
|
||||
#define pud_address(x) ((unsigned long)(pud_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
|
||||
#define pgd_flag(x) (pgd_val(x) & PxD_FLAG_MASK)
|
||||
#define pgd_address(x) ((unsigned long)(pgd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
|
||||
|
||||
|
@ -334,42 +342,32 @@ static inline void pmd_clear(pmd_t *pmd) {
|
|||
if (pmd_flag(*pmd) & PxD_FLAG_ATTACHED)
|
||||
/* This is the entry pointing to the permanent pmd
|
||||
* attached to the pgd; cannot clear it */
|
||||
__pmd_val_set(*pmd, PxD_FLAG_ATTACHED);
|
||||
set_pmd(pmd, __pmd(PxD_FLAG_ATTACHED));
|
||||
else
|
||||
#endif
|
||||
__pmd_val_set(*pmd, 0);
|
||||
set_pmd(pmd, __pmd(0));
|
||||
}
|
||||
|
||||
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define pgd_page_vaddr(pgd) ((unsigned long) __va(pgd_address(pgd)))
|
||||
#define pgd_page(pgd) virt_to_page((void *)pgd_page_vaddr(pgd))
|
||||
#define pud_page_vaddr(pud) ((unsigned long) __va(pud_address(pud)))
|
||||
#define pud_page(pud) virt_to_page((void *)pud_page_vaddr(pud))
|
||||
|
||||
/* For 64 bit we have three level tables */
|
||||
|
||||
#define pgd_none(x) (!pgd_val(x))
|
||||
#define pgd_bad(x) (!(pgd_flag(x) & PxD_FLAG_VALID))
|
||||
#define pgd_present(x) (pgd_flag(x) & PxD_FLAG_PRESENT)
|
||||
static inline void pgd_clear(pgd_t *pgd) {
|
||||
#define pud_none(x) (!pud_val(x))
|
||||
#define pud_bad(x) (!(pud_flag(x) & PxD_FLAG_VALID))
|
||||
#define pud_present(x) (pud_flag(x) & PxD_FLAG_PRESENT)
|
||||
static inline void pud_clear(pud_t *pud) {
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
if(pgd_flag(*pgd) & PxD_FLAG_ATTACHED)
|
||||
/* This is the permanent pmd attached to the pgd; cannot
|
||||
if(pud_flag(*pud) & PxD_FLAG_ATTACHED)
|
||||
/* This is the permanent pmd attached to the pud; cannot
|
||||
* free it */
|
||||
return;
|
||||
#endif
|
||||
__pgd_val_set(*pgd, 0);
|
||||
set_pud(pud, __pud(0));
|
||||
}
|
||||
#else
|
||||
/*
|
||||
* The "pgd_xxx()" functions here are trivial for a folded two-level
|
||||
* setup: the pgd is never bad, and a pmd always exists (as it's folded
|
||||
* into the pgd entry)
|
||||
*/
|
||||
static inline int pgd_none(pgd_t pgd) { return 0; }
|
||||
static inline int pgd_bad(pgd_t pgd) { return 0; }
|
||||
static inline int pgd_present(pgd_t pgd) { return 1; }
|
||||
static inline void pgd_clear(pgd_t * pgdp) { }
|
||||
#endif
|
||||
|
||||
/*
|
||||
|
@ -452,7 +450,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
|
|||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
|
||||
#define pmd_offset(dir,address) \
|
||||
((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
|
||||
((pmd_t *) pud_page_vaddr(*(dir)) + pmd_index(address))
|
||||
#else
|
||||
#define pmd_offset(dir,addr) ((pmd_t *) dir)
|
||||
#endif
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
|
||||
#include <asm-generic/tlb.h>
|
||||
|
||||
#if CONFIG_PGTABLE_LEVELS == 3
|
||||
#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
|
||||
#endif
|
||||
#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
|
||||
|
||||
#endif
|
||||
|
|
|
@ -534,13 +534,16 @@ static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr)
|
|||
pte_t *ptep = NULL;
|
||||
|
||||
if (!pgd_none(*pgd)) {
|
||||
pud_t *pud = pud_offset(pgd, addr);
|
||||
p4d_t *p4d = p4d_offset(pgd, addr);
|
||||
if (!p4d_none(*p4d)) {
|
||||
pud_t *pud = pud_offset(p4d, addr);
|
||||
if (!pud_none(*pud)) {
|
||||
pmd_t *pmd = pmd_offset(pud, addr);
|
||||
if (!pmd_none(*pmd))
|
||||
ptep = pte_offset_map(pmd, addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
return ptep;
|
||||
}
|
||||
|
||||
|
|
|
@ -133,9 +133,14 @@ static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
|
|||
|
||||
dir = pgd_offset_k(vaddr);
|
||||
do {
|
||||
p4d_t *p4d;
|
||||
pud_t *pud;
|
||||
pmd_t *pmd;
|
||||
|
||||
pmd = pmd_alloc(NULL, dir, vaddr);
|
||||
p4d = p4d_offset(dir, vaddr);
|
||||
pud = pud_offset(p4d, vaddr);
|
||||
pmd = pmd_alloc(NULL, pud, vaddr);
|
||||
|
||||
if (!pmd)
|
||||
return -ENOMEM;
|
||||
if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
|
||||
|
|
|
@ -14,11 +14,13 @@ void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys)
|
|||
{
|
||||
unsigned long vaddr = __fix_to_virt(idx);
|
||||
pgd_t *pgd = pgd_offset_k(vaddr);
|
||||
pmd_t *pmd = pmd_offset(pgd, vaddr);
|
||||
p4d_t *p4d = p4d_offset(pgd, vaddr);
|
||||
pud_t *pud = pud_offset(p4d, vaddr);
|
||||
pmd_t *pmd = pmd_offset(pud, vaddr);
|
||||
pte_t *pte;
|
||||
|
||||
if (pmd_none(*pmd))
|
||||
pmd = pmd_alloc(NULL, pgd, vaddr);
|
||||
pmd = pmd_alloc(NULL, pud, vaddr);
|
||||
|
||||
pte = pte_offset_kernel(pmd, vaddr);
|
||||
if (pte_none(*pte))
|
||||
|
@ -32,7 +34,9 @@ void notrace clear_fixmap(enum fixed_addresses idx)
|
|||
{
|
||||
unsigned long vaddr = __fix_to_virt(idx);
|
||||
pgd_t *pgd = pgd_offset_k(vaddr);
|
||||
pmd_t *pmd = pmd_offset(pgd, vaddr);
|
||||
p4d_t *p4d = p4d_offset(pgd, vaddr);
|
||||
pud_t *pud = pud_offset(p4d, vaddr);
|
||||
pmd_t *pmd = pmd_offset(pud, vaddr);
|
||||
pte_t *pte = pte_offset_kernel(pmd, vaddr);
|
||||
|
||||
if (WARN_ON(pte_none(*pte)))
|
||||
|
|
Loading…
Reference in New Issue
Block a user