x86/amd-iommu: Add passthrough mode initialization functions

When iommu=pt is passed on kernel command line the devices
should run untranslated. This requires the allocation of a
special domain for that purpose. This patch implements the
allocation and initialization path for iommu=pt.

Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
This commit is contained in:
Joerg Roedel 2009-08-26 15:26:30 +02:00
parent 2650815fb0
commit 0feae533dd
3 changed files with 69 additions and 8 deletions

View File

@ -25,6 +25,7 @@
#ifdef CONFIG_AMD_IOMMU #ifdef CONFIG_AMD_IOMMU
extern int amd_iommu_init(void); extern int amd_iommu_init(void);
extern int amd_iommu_init_dma_ops(void); extern int amd_iommu_init_dma_ops(void);
extern int amd_iommu_init_passthrough(void);
extern void amd_iommu_detect(void); extern void amd_iommu_detect(void);
extern irqreturn_t amd_iommu_int_handler(int irq, void *data); extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
extern void amd_iommu_flush_all_domains(void); extern void amd_iommu_flush_all_domains(void);

View File

@ -143,6 +143,7 @@
#define EVT_BUFFER_SIZE 8192 /* 512 entries */ #define EVT_BUFFER_SIZE 8192 /* 512 entries */
#define EVT_LEN_MASK (0x9ULL << 56) #define EVT_LEN_MASK (0x9ULL << 56)
#define PAGE_MODE_NONE 0x00
#define PAGE_MODE_1_LEVEL 0x01 #define PAGE_MODE_1_LEVEL 0x01
#define PAGE_MODE_2_LEVEL 0x02 #define PAGE_MODE_2_LEVEL 0x02
#define PAGE_MODE_3_LEVEL 0x03 #define PAGE_MODE_3_LEVEL 0x03
@ -194,6 +195,9 @@
#define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */ #define PD_DMA_OPS_MASK (1UL << 0) /* domain used for dma_ops */
#define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops #define PD_DEFAULT_MASK (1UL << 1) /* domain is a default dma_ops
domain for an IOMMU */ domain for an IOMMU */
#define PD_PASSTHROUGH_MASK (1UL << 2) /* domain has no page
translation */
extern bool amd_iommu_dump; extern bool amd_iommu_dump;
#define DUMP_printk(format, arg...) \ #define DUMP_printk(format, arg...) \
do { \ do { \

View File

@ -41,6 +41,12 @@ static DEFINE_RWLOCK(amd_iommu_devtable_lock);
static LIST_HEAD(iommu_pd_list); static LIST_HEAD(iommu_pd_list);
static DEFINE_SPINLOCK(iommu_pd_list_lock); static DEFINE_SPINLOCK(iommu_pd_list_lock);
/*
* Domain for untranslated devices - only allocated
* if iommu=pt passed on kernel cmd line.
*/
static struct protection_domain *pt_domain;
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
static struct iommu_ops amd_iommu_ops; static struct iommu_ops amd_iommu_ops;
#endif #endif
@ -1067,9 +1073,9 @@ static struct protection_domain *domain_for_device(u16 devid)
* If a device is not yet associated with a domain, this function does * If a device is not yet associated with a domain, this function does
* assigns it visible for the hardware * assigns it visible for the hardware
*/ */
static void attach_device(struct amd_iommu *iommu, static void __attach_device(struct amd_iommu *iommu,
struct protection_domain *domain, struct protection_domain *domain,
u16 devid) u16 devid)
{ {
unsigned long flags; unsigned long flags;
u64 pte_root = virt_to_phys(domain->pt_root); u64 pte_root = virt_to_phys(domain->pt_root);
@ -1087,12 +1093,19 @@ static void attach_device(struct amd_iommu *iommu,
amd_iommu_pd_table[devid] = domain; amd_iommu_pd_table[devid] = domain;
write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
}
/* static void attach_device(struct amd_iommu *iommu,
* We might boot into a crash-kernel here. The crashed kernel struct protection_domain *domain,
* left the caches in the IOMMU dirty. So we have to flush u16 devid)
* here to evict all dirty stuff. {
*/ __attach_device(iommu, domain, devid);
/*
* We might boot into a crash-kernel here. The crashed kernel
* left the caches in the IOMMU dirty. So we have to flush
* here to evict all dirty stuff.
*/
iommu_queue_inv_dev_entry(iommu, devid); iommu_queue_inv_dev_entry(iommu, devid);
iommu_flush_tlb_pde(iommu, domain->id); iommu_flush_tlb_pde(iommu, domain->id);
} }
@ -2219,3 +2232,46 @@ static struct iommu_ops amd_iommu_ops = {
.domain_has_cap = amd_iommu_domain_has_cap, .domain_has_cap = amd_iommu_domain_has_cap,
}; };
/*****************************************************************************
*
* The next functions do a basic initialization of IOMMU for pass through
* mode
*
* In passthrough mode the IOMMU is initialized and enabled but not used for
* DMA-API translation.
*
*****************************************************************************/
int __init amd_iommu_init_passthrough(void)
{
struct pci_dev *dev = NULL;
u16 devid, devid2;
/* allocate passthroug domain */
pt_domain = protection_domain_alloc();
if (!pt_domain)
return -ENOMEM;
pt_domain->mode |= PAGE_MODE_NONE;
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
struct amd_iommu *iommu;
devid = calc_devid(dev->bus->number, dev->devfn);
if (devid > amd_iommu_last_bdf)
continue;
devid2 = amd_iommu_alias_table[devid];
iommu = amd_iommu_rlookup_table[devid2];
if (!iommu)
continue;
__attach_device(iommu, pt_domain, devid);
__attach_device(iommu, pt_domain, devid2);
}
pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
return 0;
}