vfio/type1: Add conditional rescheduling

IOMMU operations can be expensive and it's not very difficult for a
user to give us a lot of work to do for a map or unmap operation.
Killing a large VM will vfio assigned devices can result in soft
lockups and IOMMU tracing shows that we can easily spend 80% of our
time with need-resched set.  A sprinkling of conf_resched() calls
after map and unmap calls has a very tiny affect on performance
while resulting in traces with <1% of calls overflowing into needs-
resched.

Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
This commit is contained in:
Alex Williamson 2015-02-06 14:19:12 -07:00
parent babbf17609
commit c5e6688752

View File

@ -351,8 +351,10 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
domain = d = list_first_entry(&iommu->domain_list,
struct vfio_domain, next);
list_for_each_entry_continue(d, &iommu->domain_list, next)
list_for_each_entry_continue(d, &iommu->domain_list, next) {
iommu_unmap(d->domain, dma->iova, dma->size);
cond_resched();
}
while (iova < end) {
size_t unmapped, len;
@ -384,6 +386,8 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
unmapped >> PAGE_SHIFT,
dma->prot, false);
iova += unmapped;
cond_resched();
}
vfio_lock_acct(-unlocked);
@ -528,6 +532,8 @@ static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova,
map_try_harder(d, iova, pfn, npage, prot))
goto unwind;
}
cond_resched();
}
return 0;