forked from luck/tmp_suning_uos_patched
mm/hmm: add missing call to hmm_pte_need_fault in HMM_PFN_SPECIAL handling
Currently if a special PTE is encountered hmm_range_fault() immediately
returns EFAULT and sets the HMM_PFN_SPECIAL error output (which nothing
uses).
EFAULT should only be returned after testing with hmm_pte_need_fault().
Also pte_devmap() and pte_special() are exclusive, and there is no need to
check IS_ENABLED, pte_special() is stubbed out to return false on
unsupported architectures.
Fixes: 992de9a8b7
("mm/hmm: allow to mirror vma of a file on a DAX backed filesystem")
Reviewed-by: Ralph Campbell <rcampbell@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
2288a9a681
commit
4055062749
19
mm/hmm.c
19
mm/hmm.c
|
@ -339,16 +339,21 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
|
|||
pte_unmap(ptep);
|
||||
return -EBUSY;
|
||||
}
|
||||
} else if (IS_ENABLED(CONFIG_ARCH_HAS_PTE_SPECIAL) && pte_special(pte)) {
|
||||
if (!is_zero_pfn(pte_pfn(pte))) {
|
||||
}
|
||||
|
||||
/*
|
||||
* Since each architecture defines a struct page for the zero page, just
|
||||
* fall through and treat it like a normal page.
|
||||
*/
|
||||
if (pte_special(pte) && !is_zero_pfn(pte_pfn(pte))) {
|
||||
hmm_pte_need_fault(hmm_vma_walk, orig_pfn, 0, &fault,
|
||||
&write_fault);
|
||||
if (fault || write_fault) {
|
||||
pte_unmap(ptep);
|
||||
*pfn = range->values[HMM_PFN_SPECIAL];
|
||||
return -EFAULT;
|
||||
}
|
||||
/*
|
||||
* Since each architecture defines a struct page for the zero
|
||||
* page, just fall through and treat it like a normal page.
|
||||
*/
|
||||
*pfn = range->values[HMM_PFN_SPECIAL];
|
||||
return 0;
|
||||
}
|
||||
|
||||
*pfn = hmm_device_entry_from_pfn(range, pte_pfn(pte)) | cpu_flags;
|
||||
|
|
Loading…
Reference in New Issue
Block a user