KVM: Replace is_hwpoison_address with __get_user_pages

is_hwpoison_address only checks whether the page table entry is
hwpoisoned, regardless the memory page mapped.  While __get_user_pages
will check both.

QEMU will clear the poisoned page table entry (via unmap/map) to make
it possible to allocate a new memory page for the virtual address
across guest rebooting.  But it is also possible that the underlying
memory page is kept poisoned even after the corresponding page table
entry is cleared, that is, a new memory page can not be allocated.
__get_user_pages can catch these situations.

Signed-off-by: Huang Ying <ying.huang@intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
This commit is contained in:
Huang Ying 2011-01-30 11:15:49 +08:00 committed by Marcelo Tosatti
parent 69ebb83e13
commit fafc3dbaac

View File

@ -1028,6 +1028,15 @@ static pfn_t get_fault_pfn(void)
return fault_pfn; return fault_pfn;
} }
static inline int check_user_page_hwpoison(unsigned long addr)
{
int rc, flags = FOLL_TOUCH | FOLL_HWPOISON | FOLL_WRITE;
rc = __get_user_pages(current, current->mm, addr, 1,
flags, NULL, NULL, NULL);
return rc == -EHWPOISON;
}
static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic, static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
bool *async, bool write_fault, bool *writable) bool *async, bool write_fault, bool *writable)
{ {
@ -1075,7 +1084,7 @@ static pfn_t hva_to_pfn(struct kvm *kvm, unsigned long addr, bool atomic,
return get_fault_pfn(); return get_fault_pfn();
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
if (is_hwpoison_address(addr)) { if (check_user_page_hwpoison(addr)) {
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
get_page(hwpoison_page); get_page(hwpoison_page);
return page_to_pfn(hwpoison_page); return page_to_pfn(hwpoison_page);