powerpc: Implement dma_mmap_coherent()

This is used by Alsa to mmap buffers allocated with dma_alloc_coherent()
into userspace. We need a special variant to handle machines with
non-coherent DMAs as those buffers have "special" virt addresses and
require non-cachable mappings

Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
This commit is contained in:
Benjamin Herrenschmidt 2011-03-24 20:50:06 +00:00
parent 15d260b36f
commit 6090912c4a
3 changed files with 44 additions and 0 deletions

View File

@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr);
extern void __dma_sync(void *vaddr, size_t size, int direction); extern void __dma_sync(void *vaddr, size_t size, int direction);
extern void __dma_sync_page(struct page *page, unsigned long offset, extern void __dma_sync_page(struct page *page, unsigned long offset,
size_t size, int direction); size_t size, int direction);
extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr);
#else /* ! CONFIG_NOT_COHERENT_CACHE */ #else /* ! CONFIG_NOT_COHERENT_CACHE */
/* /*
@ -198,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
extern int dma_mmap_coherent(struct device *, struct vm_area_struct *,
void *, dma_addr_t, size_t);
#define ARCH_HAS_DMA_MMAP_COHERENT
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction) enum dma_data_direction direction)
{ {

View File

@ -179,3 +179,21 @@ static int __init dma_init(void)
return 0; return 0;
} }
fs_initcall(dma_init); fs_initcall(dma_init);
int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size)
{
unsigned long pfn;
#ifdef CONFIG_NOT_COHERENT_CACHE
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr);
#else
pfn = page_to_pfn(virt_to_page(cpu_addr));
#endif
return remap_pfn_range(vma, vma->vm_start,
pfn + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
}
EXPORT_SYMBOL_GPL(dma_mmap_coherent);

View File

@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset,
#endif #endif
} }
EXPORT_SYMBOL(__dma_sync_page); EXPORT_SYMBOL(__dma_sync_page);
/*
* Return the PFN for a given cpu virtual address returned by
* __dma_alloc_coherent. This is used by dma_mmap_coherent()
*/
unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr)
{
/* This should always be populated, so we don't test every
* level. If that fails, we'll have a nice crash which
* will be as good as a BUG_ON()
*/
pgd_t *pgd = pgd_offset_k(cpu_addr);
pud_t *pud = pud_offset(pgd, cpu_addr);
pmd_t *pmd = pmd_offset(pud, cpu_addr);
pte_t *ptep = pte_offset_kernel(pmd, cpu_addr);
if (pte_none(*ptep) || !pte_present(*ptep))
return 0;
return pte_pfn(*ptep);
}