forked from luck/tmp_suning_uos_patched
iommu/amd: Make use of the generic IOVA allocator
Remove the old address allocation code and make use of the generic IOVA allocator that is also used by other dma-ops implementations. Signed-off-by: Joerg Roedel <jroedel@suse.de>
This commit is contained in:
parent
518d9b4503
commit
256e4621c2
|
@ -1649,167 +1649,32 @@ static int alloc_new_range(struct dma_ops_domain *dma_dom,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
static dma_addr_t dma_ops_aperture_alloc(struct dma_ops_domain *dom,
|
static unsigned long dma_ops_alloc_iova(struct device *dev,
|
||||||
struct aperture_range *range,
|
struct dma_ops_domain *dma_dom,
|
||||||
unsigned long pages,
|
unsigned int pages, u64 dma_mask)
|
||||||
unsigned long dma_mask,
|
|
||||||
unsigned long boundary_size,
|
|
||||||
unsigned long align_mask,
|
|
||||||
bool trylock)
|
|
||||||
{
|
{
|
||||||
unsigned long offset, limit, flags;
|
unsigned long pfn = 0;
|
||||||
dma_addr_t address;
|
|
||||||
bool flush = false;
|
|
||||||
|
|
||||||
offset = range->offset >> PAGE_SHIFT;
|
pages = __roundup_pow_of_two(pages);
|
||||||
limit = iommu_device_max_index(APERTURE_RANGE_PAGES, offset,
|
|
||||||
dma_mask >> PAGE_SHIFT);
|
|
||||||
|
|
||||||
if (trylock) {
|
if (dma_mask > DMA_BIT_MASK(32))
|
||||||
if (!spin_trylock_irqsave(&range->bitmap_lock, flags))
|
pfn = alloc_iova_fast(&dma_dom->iovad, pages,
|
||||||
return -1;
|
IOVA_PFN(DMA_BIT_MASK(32)));
|
||||||
} else {
|
|
||||||
spin_lock_irqsave(&range->bitmap_lock, flags);
|
|
||||||
}
|
|
||||||
|
|
||||||
address = iommu_area_alloc(range->bitmap, limit, range->next_bit,
|
if (!pfn)
|
||||||
pages, offset, boundary_size, align_mask);
|
pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask));
|
||||||
if (address == -1) {
|
|
||||||
/* Nothing found, retry one time */
|
|
||||||
address = iommu_area_alloc(range->bitmap, limit,
|
|
||||||
0, pages, offset, boundary_size,
|
|
||||||
align_mask);
|
|
||||||
flush = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (address != -1)
|
return (pfn << PAGE_SHIFT);
|
||||||
range->next_bit = address + pages;
|
|
||||||
|
|
||||||
spin_unlock_irqrestore(&range->bitmap_lock, flags);
|
|
||||||
|
|
||||||
if (flush) {
|
|
||||||
domain_flush_tlb(&dom->domain);
|
|
||||||
domain_flush_complete(&dom->domain);
|
|
||||||
}
|
|
||||||
|
|
||||||
return address;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long dma_ops_area_alloc(struct device *dev,
|
static void dma_ops_free_iova(struct dma_ops_domain *dma_dom,
|
||||||
struct dma_ops_domain *dom,
|
unsigned long address,
|
||||||
unsigned int pages,
|
unsigned int pages)
|
||||||
unsigned long align_mask,
|
|
||||||
u64 dma_mask)
|
|
||||||
{
|
{
|
||||||
unsigned long boundary_size, mask;
|
pages = __roundup_pow_of_two(pages);
|
||||||
unsigned long address = -1;
|
address >>= PAGE_SHIFT;
|
||||||
bool first = true;
|
|
||||||
u32 start, i;
|
|
||||||
|
|
||||||
preempt_disable();
|
|
||||||
|
|
||||||
mask = dma_get_seg_boundary(dev);
|
|
||||||
|
|
||||||
again:
|
|
||||||
start = this_cpu_read(*dom->next_index);
|
|
||||||
|
|
||||||
/* Sanity check - is it really necessary? */
|
|
||||||
if (unlikely(start > APERTURE_MAX_RANGES)) {
|
|
||||||
start = 0;
|
|
||||||
this_cpu_write(*dom->next_index, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
boundary_size = mask + 1 ? ALIGN(mask + 1, PAGE_SIZE) >> PAGE_SHIFT :
|
|
||||||
1UL << (BITS_PER_LONG - PAGE_SHIFT);
|
|
||||||
|
|
||||||
for (i = 0; i < APERTURE_MAX_RANGES; ++i) {
|
|
||||||
struct aperture_range *range;
|
|
||||||
int index;
|
|
||||||
|
|
||||||
index = (start + i) % APERTURE_MAX_RANGES;
|
|
||||||
|
|
||||||
range = dom->aperture[index];
|
|
||||||
|
|
||||||
if (!range || range->offset >= dma_mask)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
address = dma_ops_aperture_alloc(dom, range, pages,
|
|
||||||
dma_mask, boundary_size,
|
|
||||||
align_mask, first);
|
|
||||||
if (address != -1) {
|
|
||||||
address = range->offset + (address << PAGE_SHIFT);
|
|
||||||
this_cpu_write(*dom->next_index, index);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (address == -1 && first) {
|
|
||||||
first = false;
|
|
||||||
goto again;
|
|
||||||
}
|
|
||||||
|
|
||||||
preempt_enable();
|
|
||||||
|
|
||||||
return address;
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned long dma_ops_alloc_addresses(struct device *dev,
|
|
||||||
struct dma_ops_domain *dom,
|
|
||||||
unsigned int pages,
|
|
||||||
unsigned long align_mask,
|
|
||||||
u64 dma_mask)
|
|
||||||
{
|
|
||||||
unsigned long address = -1;
|
|
||||||
|
|
||||||
while (address == -1) {
|
|
||||||
address = dma_ops_area_alloc(dev, dom, pages,
|
|
||||||
align_mask, dma_mask);
|
|
||||||
|
|
||||||
if (address == -1 && alloc_new_range(dom, false, GFP_ATOMIC))
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (unlikely(address == -1))
|
|
||||||
address = DMA_ERROR_CODE;
|
|
||||||
|
|
||||||
WARN_ON((address + (PAGE_SIZE*pages)) > dom->aperture_size);
|
|
||||||
|
|
||||||
return address;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* The address free function.
|
|
||||||
*
|
|
||||||
* called with domain->lock held
|
|
||||||
*/
|
|
||||||
static void dma_ops_free_addresses(struct dma_ops_domain *dom,
|
|
||||||
unsigned long address,
|
|
||||||
unsigned int pages)
|
|
||||||
{
|
|
||||||
unsigned i = address >> APERTURE_RANGE_SHIFT;
|
|
||||||
struct aperture_range *range = dom->aperture[i];
|
|
||||||
unsigned long flags;
|
|
||||||
|
|
||||||
BUG_ON(i >= APERTURE_MAX_RANGES || range == NULL);
|
|
||||||
|
|
||||||
#ifdef CONFIG_IOMMU_STRESS
|
|
||||||
if (i < 4)
|
|
||||||
return;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (amd_iommu_unmap_flush) {
|
|
||||||
domain_flush_tlb(&dom->domain);
|
|
||||||
domain_flush_complete(&dom->domain);
|
|
||||||
}
|
|
||||||
|
|
||||||
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
spin_lock_irqsave(&range->bitmap_lock, flags);
|
|
||||||
if (address + pages > range->next_bit)
|
|
||||||
range->next_bit = address + pages;
|
|
||||||
bitmap_clear(range->bitmap, address, pages);
|
|
||||||
spin_unlock_irqrestore(&range->bitmap_lock, flags);
|
|
||||||
|
|
||||||
|
free_iova_fast(&dma_dom->iovad, address, pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
/****************************************************************************
|
/****************************************************************************
|
||||||
|
@ -2586,9 +2451,7 @@ static dma_addr_t __map_single(struct device *dev,
|
||||||
if (align)
|
if (align)
|
||||||
align_mask = (1UL << get_order(size)) - 1;
|
align_mask = (1UL << get_order(size)) - 1;
|
||||||
|
|
||||||
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
|
address = dma_ops_alloc_iova(dev, dma_dom, pages, dma_mask);
|
||||||
dma_mask);
|
|
||||||
|
|
||||||
if (address == DMA_ERROR_CODE)
|
if (address == DMA_ERROR_CODE)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -2626,7 +2489,10 @@ static dma_addr_t __map_single(struct device *dev,
|
||||||
iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
|
iommu_unmap_page(&dma_dom->domain, start, PAGE_SIZE);
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_ops_free_addresses(dma_dom, address, pages);
|
domain_flush_tlb(&dma_dom->domain);
|
||||||
|
domain_flush_complete(&dma_dom->domain);
|
||||||
|
|
||||||
|
dma_ops_free_iova(dma_dom, address, pages);
|
||||||
|
|
||||||
return DMA_ERROR_CODE;
|
return DMA_ERROR_CODE;
|
||||||
}
|
}
|
||||||
|
@ -2658,7 +2524,10 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
|
||||||
start += PAGE_SIZE;
|
start += PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_ops_free_addresses(dma_dom, dma_addr, pages);
|
domain_flush_tlb(&dma_dom->domain);
|
||||||
|
domain_flush_complete(&dma_dom->domain);
|
||||||
|
|
||||||
|
dma_ops_free_iova(dma_dom, dma_addr, pages);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
Loading…
Reference in New Issue
Block a user