forked from luck/tmp_suning_uos_patched
mm/memory.c: remove ZAP_BLOCK_SIZE
ZAP_BLOCK_SIZE became unused in the preemptible-mmu_gather work ("mm: Remove i_mmap_lock lockbreak"). So zap it. Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
32f84528fb
commit
6ac4752006
11
mm/memory.c
11
mm/memory.c
@ -1290,13 +1290,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
||||
return addr;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PREEMPT
|
||||
# define ZAP_BLOCK_SIZE (8 * PAGE_SIZE)
|
||||
#else
|
||||
/* No preempt: go for improved straight-line efficiency */
|
||||
# define ZAP_BLOCK_SIZE (1024 * PAGE_SIZE)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* unmap_vmas - unmap a range of memory covered by a list of vma's
|
||||
* @tlb: address of the caller's struct mmu_gather
|
||||
@ -1310,10 +1303,6 @@ static unsigned long unmap_page_range(struct mmu_gather *tlb,
|
||||
*
|
||||
* Unmap all pages in the vma list.
|
||||
*
|
||||
* We aim to not hold locks for too long (for scheduling latency reasons).
|
||||
* So zap pages in ZAP_BLOCK_SIZE bytecounts. This means we need to
|
||||
* return the ending mmu_gather to the caller.
|
||||
*
|
||||
* Only addresses between `start' and `end' will be unmapped.
|
||||
*
|
||||
* The VMA list must be sorted in ascending virtual address order.
|
||||
|
Loading…
Reference in New Issue
Block a user