f9cb654cb5
Most architectures define pgd_free() as a wrapper for free_page(). Provide a generic version in asm-generic/pgalloc.h and enable its use for most architectures. Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Pekka Enberg <penberg@kernel.org> Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> [m68k] Cc: Abdul Haleem <abdhalee@linux.vnet.ibm.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Christophe Leroy <christophe.leroy@csgroup.eu> Cc: Joerg Roedel <joro@8bytes.org> Cc: Joerg Roedel <jroedel@suse.de> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Satheesh Rajendran <sathnaga@linux.vnet.ibm.com> Cc: Stafford Horne <shorne@gmail.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Matthew Wilcox <willy@infradead.org> Link: http://lkml.kernel.org/r/20200627143453.31835-7-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
97 lines
2.0 KiB
C
97 lines
2.0 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
// Copyright (C) 2005-2017 Andes Technology Corporation
|
|
|
|
#include <linux/init_task.h>
|
|
|
|
#define __HAVE_ARCH_PGD_FREE
|
|
#include <asm/pgalloc.h>
|
|
|
|
#define FIRST_KERNEL_PGD_NR (USER_PTRS_PER_PGD)
|
|
|
|
/*
|
|
* need to get a page for level 1
|
|
*/
|
|
|
|
pgd_t *pgd_alloc(struct mm_struct *mm)
|
|
{
|
|
pgd_t *new_pgd, *init_pgd;
|
|
int i;
|
|
|
|
new_pgd = (pgd_t *) __get_free_pages(GFP_KERNEL, 0);
|
|
if (!new_pgd)
|
|
return NULL;
|
|
for (i = 0; i < PTRS_PER_PGD; i++) {
|
|
(*new_pgd) = 1;
|
|
new_pgd++;
|
|
}
|
|
new_pgd -= PTRS_PER_PGD;
|
|
|
|
init_pgd = pgd_offset_k(0);
|
|
|
|
memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
|
|
(PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
|
|
|
|
cpu_dcache_wb_range((unsigned long)new_pgd,
|
|
(unsigned long)new_pgd +
|
|
PTRS_PER_PGD * sizeof(pgd_t));
|
|
inc_zone_page_state(virt_to_page((unsigned long *)new_pgd),
|
|
NR_PAGETABLE);
|
|
|
|
return new_pgd;
|
|
}
|
|
|
|
void pgd_free(struct mm_struct *mm, pgd_t * pgd)
|
|
{
|
|
pmd_t *pmd;
|
|
struct page *pte;
|
|
|
|
if (!pgd)
|
|
return;
|
|
|
|
pmd = (pmd_t *) pgd;
|
|
if (pmd_none(*pmd))
|
|
goto free;
|
|
if (pmd_bad(*pmd)) {
|
|
pmd_ERROR(*pmd);
|
|
pmd_clear(pmd);
|
|
goto free;
|
|
}
|
|
|
|
pte = pmd_page(*pmd);
|
|
pmd_clear(pmd);
|
|
dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
|
|
pte_free(mm, pte);
|
|
mm_dec_nr_ptes(mm);
|
|
pmd_free(mm, pmd);
|
|
free:
|
|
free_pages((unsigned long)pgd, 0);
|
|
}
|
|
|
|
/*
|
|
* In order to soft-boot, we need to insert a 1:1 mapping in place of
|
|
* the user-mode pages. This will then ensure that we have predictable
|
|
* results when turning the mmu off
|
|
*/
|
|
void setup_mm_for_reboot(char mode)
|
|
{
|
|
unsigned long pmdval;
|
|
pgd_t *pgd;
|
|
p4d_t *p4d;
|
|
pud_t *pud;
|
|
pmd_t *pmd;
|
|
int i;
|
|
|
|
if (current->mm && current->mm->pgd)
|
|
pgd = current->mm->pgd;
|
|
else
|
|
pgd = init_mm.pgd;
|
|
|
|
for (i = 0; i < USER_PTRS_PER_PGD; i++) {
|
|
pmdval = (i << PGDIR_SHIFT);
|
|
p4d = p4d_offset(pgd, i << PGDIR_SHIFT);
|
|
pud = pud_offset(p4d, i << PGDIR_SHIFT);
|
|
pmd = pmd_offset(pud + i, i << PGDIR_SHIFT);
|
|
set_pmd(pmd, __pmd(pmdval));
|
|
}
|
|
}
|