s390/kasan: free early identity mapping structures

Kasan initialization code is changed to populate persistent shadow
first, save allocator position into pgalloc_freeable and proceed with
early identity mapping creation. This way early identity mapping paging
structures could be freed at once after switching to swapper_pg_dir
when early identity mapping is not needed anymore.

Reviewed-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Vasily Gorbik 2017-11-20 12:56:10 +01:00 committed by Martin Schwidefsky
parent 5e78596329
commit 135ff16393
3 changed files with 13 additions and 2 deletions

View File

@ -15,9 +15,11 @@
extern void kasan_early_init(void); extern void kasan_early_init(void);
extern void kasan_copy_shadow(pgd_t *dst); extern void kasan_copy_shadow(pgd_t *dst);
extern void kasan_free_early_identity(void);
#else #else
static inline void kasan_early_init(void) { } static inline void kasan_early_init(void) { }
static inline void kasan_copy_shadow(pgd_t *dst) { } static inline void kasan_copy_shadow(pgd_t *dst) { }
static inline void kasan_free_early_identity(void) { }
#endif #endif
#endif #endif

View File

@ -109,6 +109,7 @@ void __init paging_init(void)
psw_bits(psw).dat = 1; psw_bits(psw).dat = 1;
psw_bits(psw).as = PSW_BITS_AS_HOME; psw_bits(psw).as = PSW_BITS_AS_HOME;
__load_psw_mask(psw.mask); __load_psw_mask(psw.mask);
kasan_free_early_identity();
sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_memory_present_with_active_regions(MAX_NUMNODES);
sparse_init(); sparse_init();

View File

@ -15,6 +15,7 @@ static unsigned long segment_pos __initdata;
static unsigned long segment_low __initdata; static unsigned long segment_low __initdata;
static unsigned long pgalloc_pos __initdata; static unsigned long pgalloc_pos __initdata;
static unsigned long pgalloc_low __initdata; static unsigned long pgalloc_low __initdata;
static unsigned long pgalloc_freeable __initdata;
static bool has_edat __initdata; static bool has_edat __initdata;
static bool has_nx __initdata; static bool has_nx __initdata;
@ -298,14 +299,16 @@ void __init kasan_early_init(void)
* | 2Gb | \| unmapped | allocated per module * | 2Gb | \| unmapped | allocated per module
* +-----------------+ +- shadow end ---+ * +-----------------+ +- shadow end ---+
*/ */
/* populate identity mapping */
kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
/* populate kasan shadow (for identity mapping and zero page mapping) */ /* populate kasan shadow (for identity mapping and zero page mapping) */
kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP); kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
if (IS_ENABLED(CONFIG_MODULES)) if (IS_ENABLED(CONFIG_MODULES))
untracked_mem_end = vmax - MODULES_LEN; untracked_mem_end = vmax - MODULES_LEN;
kasan_early_vmemmap_populate(__sha(memsize), __sha(untracked_mem_end), kasan_early_vmemmap_populate(__sha(memsize), __sha(untracked_mem_end),
POPULATE_ZERO_SHADOW); POPULATE_ZERO_SHADOW);
/* memory allocated for identity mapping structs will be freed later */
pgalloc_freeable = pgalloc_pos;
/* populate identity mapping */
kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
kasan_set_pgd(early_pg_dir, asce_type); kasan_set_pgd(early_pg_dir, asce_type);
kasan_enable_dat(); kasan_enable_dat();
/* enable kasan */ /* enable kasan */
@ -345,3 +348,8 @@ void __init kasan_copy_shadow(pgd_t *pg_dir)
memcpy(pu_dir_dst, pu_dir_src, memcpy(pu_dir_dst, pu_dir_src,
(KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t)); (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
} }
void __init kasan_free_early_identity(void)
{
memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
}