forked from luck/tmp_suning_uos_patched
mm: add a pmd_fault handler
Allow non-anonymous VMAs to provide huge pages in response to a page fault. Signed-off-by: Matthew Wilcox <willy@linux.intel.com> Cc: Hillf Danton <dhillf@gmail.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Theodore Ts'o <tytso@mit.edu> Cc: Jan Kara <jack@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
4897c7655d
commit
b96375f74a
|
@ -249,6 +249,8 @@ struct vm_operations_struct {
|
|||
void (*close)(struct vm_area_struct * area);
|
||||
int (*mremap)(struct vm_area_struct * area);
|
||||
int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
int (*pmd_fault)(struct vm_area_struct *, unsigned long address,
|
||||
pmd_t *, unsigned int flags);
|
||||
void (*map_pages)(struct vm_area_struct *vma, struct vm_fault *vmf);
|
||||
|
||||
/* notification that a previously read-only page is about to become
|
||||
|
|
30
mm/memory.c
30
mm/memory.c
|
@ -3232,6 +3232,27 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static int create_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd, unsigned int flags)
|
||||
{
|
||||
if (!vma->vm_ops)
|
||||
return do_huge_pmd_anonymous_page(mm, vma, address, pmd, flags);
|
||||
if (vma->vm_ops->pmd_fault)
|
||||
return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
static int wp_huge_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd, pmd_t orig_pmd,
|
||||
unsigned int flags)
|
||||
{
|
||||
if (!vma->vm_ops)
|
||||
return do_huge_pmd_wp_page(mm, vma, address, pmd, orig_pmd);
|
||||
if (vma->vm_ops->pmd_fault)
|
||||
return vma->vm_ops->pmd_fault(vma, address, pmd, flags);
|
||||
return VM_FAULT_FALLBACK;
|
||||
}
|
||||
|
||||
/*
|
||||
* These routines also need to handle stuff like marking pages dirty
|
||||
* and/or accessed for architectures that don't do it in hardware (most
|
||||
|
@ -3334,10 +3355,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
if (!pmd)
|
||||
return VM_FAULT_OOM;
|
||||
if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
|
||||
int ret = VM_FAULT_FALLBACK;
|
||||
if (!vma->vm_ops)
|
||||
ret = do_huge_pmd_anonymous_page(mm, vma, address,
|
||||
pmd, flags);
|
||||
int ret = create_huge_pmd(mm, vma, address, pmd, flags);
|
||||
if (!(ret & VM_FAULT_FALLBACK))
|
||||
return ret;
|
||||
} else {
|
||||
|
@ -3361,8 +3379,8 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||
orig_pmd, pmd);
|
||||
|
||||
if (dirty && !pmd_write(orig_pmd)) {
|
||||
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
|
||||
orig_pmd);
|
||||
ret = wp_huge_pmd(mm, vma, address, pmd,
|
||||
orig_pmd, flags);
|
||||
if (!(ret & VM_FAULT_FALLBACK))
|
||||
return ret;
|
||||
} else {
|
||||
|
|
Loading…
Reference in New Issue
Block a user