forked from luck/tmp_suning_uos_patched
e05c7b1f2b
The powerpc 32-bit implementation of pgtable has nice shortcuts for accessing kernel PMD and PTE for a given virtual address. Make these helpers available for all architectures. [rppt@linux.ibm.com: microblaze: fix page table traversal in setup_rt_frame()] Link: http://lkml.kernel.org/r/20200518191511.GD1118872@kernel.org [akpm@linux-foundation.org: s/pmd_ptr_k/pmd_off_k/ in various powerpc places] Signed-off-by: Mike Rapoport <rppt@linux.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Cc: Arnd Bergmann <arnd@arndb.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Cain <bcain@codeaurora.org> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Chris Zankel <chris@zankel.net> Cc: "David S. Miller" <davem@davemloft.net> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Greentime Hu <green.hu@gmail.com> Cc: Greg Ungerer <gerg@linux-m68k.org> Cc: Guan Xuetao <gxt@pku.edu.cn> Cc: Guo Ren <guoren@kernel.org> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Helge Deller <deller@gmx.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: Ley Foon Tan <ley.foon.tan@intel.com> Cc: Mark Salter <msalter@redhat.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Matt Turner <mattst88@gmail.com> Cc: Max Filippov <jcmvbkbc@gmail.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Michal Simek <monstr@monstr.eu> Cc: Nick Hu <nickhu@andestech.com> Cc: Paul Walmsley <paul.walmsley@sifive.com> Cc: Richard Weinberger <richard@nod.at> Cc: Rich Felker <dalias@libc.org> Cc: Russell King <linux@armlinux.org.uk> Cc: Stafford Horne <shorne@gmail.com> Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: Vincent Chen <deanbo422@gmail.com> Cc: Vineet Gupta <vgupta@synopsys.com> Cc: Will Deacon <will@kernel.org> Cc: Yoshinori Sato <ysato@users.sourceforge.jp> Link: http://lkml.kernel.org/r/20200514170327.31389-9-rppt@kernel.org Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
105 lines
2.8 KiB
C
105 lines
2.8 KiB
C
/*
|
|
* Xtensa KASAN shadow map initialization
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*
|
|
* Copyright (C) 2017 Cadence Design Systems Inc.
|
|
*/
|
|
|
|
#include <linux/memblock.h>
|
|
#include <linux/init_task.h>
|
|
#include <linux/kasan.h>
|
|
#include <linux/kernel.h>
|
|
#include <asm/initialize_mmu.h>
|
|
#include <asm/tlbflush.h>
|
|
#include <asm/traps.h>
|
|
|
|
void __init kasan_early_init(void)
|
|
{
|
|
unsigned long vaddr = KASAN_SHADOW_START;
|
|
pmd_t *pmd = pmd_off_k(vaddr);
|
|
int i;
|
|
|
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
|
set_pte(kasan_early_shadow_pte + i,
|
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
|
PAGE_KERNEL));
|
|
|
|
for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) {
|
|
BUG_ON(!pmd_none(*pmd));
|
|
set_pmd(pmd, __pmd((unsigned long)kasan_early_shadow_pte));
|
|
}
|
|
early_trap_init();
|
|
}
|
|
|
|
static void __init populate(void *start, void *end)
|
|
{
|
|
unsigned long n_pages = (end - start) / PAGE_SIZE;
|
|
unsigned long n_pmds = n_pages / PTRS_PER_PTE;
|
|
unsigned long i, j;
|
|
unsigned long vaddr = (unsigned long)start;
|
|
pmd_t *pmd = pmd_off_k(vaddr);
|
|
pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
|
|
|
|
if (!pte)
|
|
panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
|
|
__func__, n_pages * sizeof(pte_t), PAGE_SIZE);
|
|
|
|
pr_debug("%s: %p - %p\n", __func__, start, end);
|
|
|
|
for (i = j = 0; i < n_pmds; ++i) {
|
|
int k;
|
|
|
|
for (k = 0; k < PTRS_PER_PTE; ++k, ++j) {
|
|
phys_addr_t phys =
|
|
memblock_phys_alloc_range(PAGE_SIZE, PAGE_SIZE,
|
|
0,
|
|
MEMBLOCK_ALLOC_ANYWHERE);
|
|
|
|
if (!phys)
|
|
panic("Failed to allocate page table page\n");
|
|
|
|
set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
|
|
}
|
|
}
|
|
|
|
for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE)
|
|
set_pmd(pmd + i, __pmd((unsigned long)pte));
|
|
|
|
local_flush_tlb_all();
|
|
memset(start, 0, end - start);
|
|
}
|
|
|
|
void __init kasan_init(void)
|
|
{
|
|
int i;
|
|
|
|
BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START -
|
|
(KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT));
|
|
BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR);
|
|
|
|
/*
|
|
* Replace shadow map pages that cover addresses from VMALLOC area
|
|
* start to the end of KSEG with clean writable pages.
|
|
*/
|
|
populate(kasan_mem_to_shadow((void *)VMALLOC_START),
|
|
kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR));
|
|
|
|
/*
|
|
* Write protect kasan_early_shadow_page and zero-initialize it again.
|
|
*/
|
|
for (i = 0; i < PTRS_PER_PTE; ++i)
|
|
set_pte(kasan_early_shadow_pte + i,
|
|
mk_pte(virt_to_page(kasan_early_shadow_page),
|
|
PAGE_KERNEL_RO));
|
|
|
|
local_flush_tlb_all();
|
|
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
|
|
|
|
/* At this point kasan is fully initialized. Enable error messages. */
|
|
current->kasan_depth = 0;
|
|
pr_info("KernelAddressSanitizer initialized\n");
|
|
}
|