forked from luck/tmp_suning_uos_patched
d8ed45c5dc
This change converts the existing mmap_sem rwsem calls to use the new mmap locking API instead. The change is generated using coccinelle with the following rule: // spatch --sp-file mmap_lock_api.cocci --in-place --include-headers --dir . @@ expression mm; @@ ( -init_rwsem +mmap_init_lock | -down_write +mmap_write_lock | -down_write_killable +mmap_write_lock_killable | -down_write_trylock +mmap_write_trylock | -up_write +mmap_write_unlock | -downgrade_write +mmap_write_downgrade | -down_read +mmap_read_lock | -down_read_killable +mmap_read_lock_killable | -down_read_trylock +mmap_read_trylock | -up_read +mmap_read_unlock ) -(&mm->mmap_sem) +(mm) Signed-off-by: Michel Lespinasse <walken@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Reviewed-by: Daniel Jordan <daniel.m.jordan@oracle.com> Reviewed-by: Laurent Dufour <ldufour@linux.ibm.com> Reviewed-by: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dbueso@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Jerome Glisse <jglisse@redhat.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: Liam Howlett <Liam.Howlett@oracle.com> Cc: Matthew Wilcox <willy@infradead.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Ying Han <yinghan@google.com> Link: http://lkml.kernel.org/r/20200520052908.204642-5-walken@google.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
128 lines
3.2 KiB
C
128 lines
3.2 KiB
C
/*
|
|
* Copyright (C) 2013 Altera Corporation
|
|
* Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
|
|
* Copyright (C) 2009 Wind River Systems Inc
|
|
* Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
|
|
* Copyright (C) 2004 Microtronix Datacom Ltd
|
|
*
|
|
* based on arch/m68k/mm/init.c
|
|
*
|
|
* This file is subject to the terms and conditions of the GNU General Public
|
|
* License. See the file "COPYING" in the main directory of this archive
|
|
* for more details.
|
|
*/
|
|
|
|
#include <linux/signal.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/string.h>
|
|
#include <linux/types.h>
|
|
#include <linux/ptrace.h>
|
|
#include <linux/mman.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/init.h>
|
|
#include <linux/pagemap.h>
|
|
#include <linux/memblock.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/binfmts.h>
|
|
|
|
#include <asm/setup.h>
|
|
#include <asm/page.h>
|
|
#include <asm/sections.h>
|
|
#include <asm/tlb.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/cpuinfo.h>
|
|
#include <asm/processor.h>
|
|
|
|
pgd_t *pgd_current;
|
|
|
|
/*
|
|
* paging_init() continues the virtual memory environment setup which
|
|
* was begun by the code in arch/head.S.
|
|
* The parameters are pointers to where to stick the starting and ending
|
|
* addresses of available kernel virtual memory.
|
|
*/
|
|
void __init paging_init(void)
|
|
{
|
|
unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
|
|
|
|
pagetable_init();
|
|
pgd_current = swapper_pg_dir;
|
|
|
|
max_zone_pfn[ZONE_NORMAL] = max_mapnr;
|
|
|
|
/* pass the memory from the bootmem allocator to the main allocator */
|
|
free_area_init(max_zone_pfn);
|
|
|
|
flush_dcache_range((unsigned long)empty_zero_page,
|
|
(unsigned long)empty_zero_page + PAGE_SIZE);
|
|
}
|
|
|
|
void __init mem_init(void)
|
|
{
|
|
unsigned long end_mem = memory_end; /* this must not include
|
|
kernel stack at top */
|
|
|
|
pr_debug("mem_init: start=%lx, end=%lx\n", memory_start, memory_end);
|
|
|
|
end_mem &= PAGE_MASK;
|
|
high_memory = __va(end_mem);
|
|
|
|
/* this will put all memory onto the freelists */
|
|
memblock_free_all();
|
|
mem_init_print_info(NULL);
|
|
}
|
|
|
|
void __init mmu_init(void)
|
|
{
|
|
flush_tlb_all();
|
|
}
|
|
|
|
#define __page_aligned(order) __aligned(PAGE_SIZE << (order))
|
|
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned(PGD_ORDER);
|
|
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned(PTE_ORDER);
|
|
static struct page *kuser_page[1];
|
|
|
|
static int alloc_kuser_page(void)
|
|
{
|
|
extern char __kuser_helper_start[], __kuser_helper_end[];
|
|
int kuser_sz = __kuser_helper_end - __kuser_helper_start;
|
|
unsigned long vpage;
|
|
|
|
vpage = get_zeroed_page(GFP_ATOMIC);
|
|
if (!vpage)
|
|
return -ENOMEM;
|
|
|
|
/* Copy kuser helpers */
|
|
memcpy((void *)vpage, __kuser_helper_start, kuser_sz);
|
|
|
|
flush_icache_range(vpage, vpage + KUSER_SIZE);
|
|
kuser_page[0] = virt_to_page(vpage);
|
|
|
|
return 0;
|
|
}
|
|
arch_initcall(alloc_kuser_page);
|
|
|
|
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
|
|
{
|
|
struct mm_struct *mm = current->mm;
|
|
int ret;
|
|
|
|
mmap_write_lock(mm);
|
|
|
|
/* Map kuser helpers to user space address */
|
|
ret = install_special_mapping(mm, KUSER_BASE, KUSER_SIZE,
|
|
VM_READ | VM_EXEC | VM_MAYREAD |
|
|
VM_MAYEXEC, kuser_page);
|
|
|
|
mmap_write_unlock(mm);
|
|
|
|
return ret;
|
|
}
|
|
|
|
const char *arch_vma_name(struct vm_area_struct *vma)
|
|
{
|
|
return (vma->vm_start == KUSER_BASE) ? "[kuser]" : NULL;
|
|
}
|