iommu/vt-d: Cache PCI ATS state and Invalidate Queue Depth

We check the ATS state (enabled/disabled) and fetch the PCI ATS Invalidate
Queue Depth in performance-sensitive paths.  It's easy to cache these,
which removes dependencies on PCI.

Remember the ATS enabled state.  When enabling, read the queue depth once
and cache it in the device_domain_info struct.  This is similar to what
amd_iommu.c does.

Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
Reviewed-by: Joerg Roedel <jroedel@suse.de>
Acked-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
Bjorn Helgaas 2015-07-20 09:10:36 -05:00
parent bc0195aad0
commit fb0cc3aa55

View File

@ -408,6 +408,10 @@ struct device_domain_info {
struct list_head global; /* link to global list */
u8 bus; /* PCI bus number */
u8 devfn; /* PCI devfn number */
struct {
u8 enabled:1;
u8 qdep;
} ats; /* ATS state */
struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
struct intel_iommu *iommu; /* IOMMU used by this device */
struct dmar_domain *domain; /* pointer to domain */
@ -1391,19 +1395,26 @@ iommu_support_dev_iotlb (struct dmar_domain *domain, struct intel_iommu *iommu,
static void iommu_enable_dev_iotlb(struct device_domain_info *info)
{
struct pci_dev *pdev;
if (!info || !dev_is_pci(info->dev))
return;
pci_enable_ats(to_pci_dev(info->dev), VTD_PAGE_SHIFT);
pdev = to_pci_dev(info->dev);
if (pci_enable_ats(pdev, VTD_PAGE_SHIFT))
return;
info->ats.enabled = 1;
info->ats.qdep = pci_ats_queue_depth(pdev);
}
static void iommu_disable_dev_iotlb(struct device_domain_info *info)
{
if (!info->dev || !dev_is_pci(info->dev) ||
!pci_ats_enabled(to_pci_dev(info->dev)))
if (!info->ats.enabled)
return;
pci_disable_ats(to_pci_dev(info->dev));
info->ats.enabled = 0;
}
static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
@ -1415,16 +1426,11 @@ static void iommu_flush_dev_iotlb(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_entry(info, &domain->devices, link) {
struct pci_dev *pdev;
if (!info->dev || !dev_is_pci(info->dev))
continue;
pdev = to_pci_dev(info->dev);
if (!pci_ats_enabled(pdev))
if (!info->ats.enabled)
continue;
sid = info->bus << 8 | info->devfn;
qdep = pci_ats_queue_depth(pdev);
qdep = info->ats.qdep;
qi_flush_dev_iotlb(info->iommu, sid, qdep, addr, mask);
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@ -2272,6 +2278,8 @@ static struct dmar_domain *dmar_insert_dev_info(struct intel_iommu *iommu,
info->bus = bus;
info->devfn = devfn;
info->ats.enabled = 0;
info->ats.qdep = 0;
info->dev = dev;
info->domain = domain;
info->iommu = iommu;