mm: use vma_pages() to replace (vm_end - vm_start) >> PAGE_SHIFT
(*->vm_end - *->vm_start) >> PAGE_SHIFT operation is implemented as a inline funcion vma_pages() in linux/mm.h, so using it. Signed-off-by: Libin <huawei.libin@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
b430e9d1c6
commit
d6e9321770
@ -2904,7 +2904,7 @@ static inline void unmap_mapping_range_tree(struct rb_root *root,
|
||||
details->first_index, details->last_index) {
|
||||
|
||||
vba = vma->vm_pgoff;
|
||||
vea = vba + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) - 1;
|
||||
vea = vba + vma_pages(vma) - 1;
|
||||
/* Assume for now that PAGE_CACHE_SHIFT == PAGE_SHIFT */
|
||||
zba = details->first_index;
|
||||
if (zba < vba)
|
||||
|
@ -955,7 +955,7 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
|
||||
if (is_mergeable_vma(vma, file, vm_flags) &&
|
||||
is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
|
||||
pgoff_t vm_pglen;
|
||||
vm_pglen = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
vm_pglen = vma_pages(vma);
|
||||
if (vma->vm_pgoff + vm_pglen == vm_pgoff)
|
||||
return 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user