forked from luck/tmp_suning_uos_patched
7c2763c423
nds32 has only two-level page tables and can use pgtable-nopmd and folding of the upper layers. Replace usage of include/asm-generic/4level-fixup.h and explicit definition of __PAGETABLE_PMD_FOLDED in nds32 with include/asm-generic/pgtable-nopmd.h and adjust page table manipulation macros and functions accordingly. Link: http://lkml.kernel.org/r/1572938135-31886-8-git-send-email-rppt@kernel.org Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Cc: Anatoly Pugachev <matorola@gmail.com> Cc: Anton Ivanov <anton.ivanov@cambridgegreys.com> Cc: Arnd Bergmann <arnd@arndb.de> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Helge Deller <deller@gmx.de> Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: "Kirill A. Shutemov" <kirill@shutemov.name> Cc: Mark Salter <msalter@redhat.com> Cc: Matt Turner <mattst88@gmail.com> Cc: Michal Simek <monstr@monstr.eu> Cc: Peter Rosin <peda@axentia.se> Cc: Richard Weinberger <richard@nod.at> Cc: Rolf Eike Beer <eike-kernel@sf-tec.de> Cc: Russell King <linux@armlinux.org.uk> Cc: Russell King <rmk+kernel@armlinux.org.uk> Cc: Sam Creasey <sammy@sammy.net> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <Vineet.Gupta1@synopsys.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
95 lines
1.9 KiB
C
95 lines
1.9 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2005-2017 Andes Technology Corporation
|
|
|
|
#include <linux/init_task.h>
|
|
#include <asm/pgalloc.h>
|
|
|
|
#define FIRST_KERNEL_PGD_NR (USER_PTRS_PER_PGD)
|
|
|
|
/*
|
|
* need to get a page for level 1
|
|
*/
|
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *new_pgd, *init_pgd;
|
|
int i;
|
|
|
|
new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0);
|
|
if (!new_pgd)
|
|
return NULL;
|
|
for (i = 0; i < PTRS_PER_PGD; i++) {
|
|
(*new_pgd) = 1;
|
|
new_pgd++;
|
|
}
|
|
new_pgd -= PTRS_PER_PGD;
|
|
|
|
init_pgd = pgd_offset_k(0);
|
|
|
|
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
|
|
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
|
|
|
|
cpu_dcache_wb_range((unsigned long)new_pgd,
|
|
(unsigned long)new_pgd +
|
|
PTRS_PER_PGD * sizeof(pgd_t));
|
|
inc_zone_page_state(virt_to_page((unsigned long *)new_pgd),
|
|
NR_PAGETABLE);
|
|
|
|
return new_pgd;
|
|
}
|
|
|
|
void pgd_free(struct mm_struct *mm, pgd_t * pgd)
|
|
{
|
|
pmd_t *pmd;
|
|
struct page *pte;
|
|
|
|
if (!pgd)
|
|
return;
|
|
|
|
pmd = (pmd_t *) pgd;
|
|
if (pmd_none(*pmd))
|
|
goto free;
|
|
if (pmd_bad(*pmd)) {
|
|
pmd_ERROR(*pmd);
|
|
pmd_clear(pmd);
|
|
goto free;
|
|
}
|
|
|
|
pte = pmd_page(*pmd);
|
|
pmd_clear(pmd);
|
|
dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
|
|
pte_free(mm, pte);
|
|
mm_dec_nr_ptes(mm);
|
|
pmd_free(mm, pmd);
|
|
free:
|
|
free_pages((unsigned long)pgd, 0);
|
|
}
|
|
|
|
/*
|
|
* In order to soft-boot, we need to insert a 1:1 mapping in place of
|
|
* the user-mode pages. This will then ensure that we have predictable
|
|
* results when turning the mmu off
|
|
*/
|
|
void setup_mm_for_reboot(char mode)
|
|
{
|
|
unsigned long pmdval;
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
int i;
|
|
|
|
if (current->mm && current->mm->pgd)
|
|
pgd = current->mm->pgd;
|
|
else
|
|
pgd = init_mm.pgd;
|
|
|
|
for (i = 0; i < USER_PTRS_PER_PGD; i++) {
|
|
pmdval = (i << PGDIR_SHIFT);
|
|
p4d = p4d_offset(pgd, i << PGDIR_SHIFT);
|
|
pud = pud_offset(p4d, i << PGDIR_SHIFT);
|
|
pmd = pmd_offset(pud + i, i << PGDIR_SHIFT);
|
|
set_pmd(pmd, __pmd(pmdval));
|
|
}
|
|
}
|