1ee18de929
- enhance the dma pool to allow atomic allocation on x86 with AMD SEV (David Rientjes) - two small cleanups (Jason Yan and Peter Collingbourne) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl7bvTULHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMJVhAAgTiWNzxPJhM6RTeRooM6W0NvcZGTJT6ExyJghaau aJvHUjXPrRmeBM8Zjwbbu5dioncd8c7npfRjBvATaEL74pa1u9gH3jnUTxh6L4WQ /FTNYryZVbprXJsdFuDZvCsO/CChqfZL8PWz+NFgIpICOyyXdorQELMhCaeOhnfU /goq6SvKmPlmXdb4eM2fXRD7udt1qlp+Oq2EZUdT3Xb4CBFsWUYbOMde22VY390Z 2E9mEztOaKjNgAM/TfCoXo7iRUSwxcpO5aSliDhJJ/7uWaxyWTzFlaoIlwIkkNKb TcguNJbIZtjIXwBMv9gS6CqVEgFymmWqX5Tr23+vbb7S/235HqKtN1dPmV2h4R0H QOpvYXfm6kc4tpH4J32NMp+IqfQmwgMbNtUsiXWk5Lxl27cb8K2Q5eqEwxRWMbG+ HObO7Kzb8oCygWwozZ+3QcWSr+9QAgzsb4Jl4jg6adjd8LDcbmKo4B9TKptGpVnL xjDleKdb/P4Vq55q9KHFLjqFUesuQIv2mKl2s+zr2BqROxjZ562kM9QHwsoCqc4Q tFuVed+XOoT7yhdKdtwEK7lwcQBtZgP5l/HgsoosmuJ975holsQ4pbKSf4A2Y4yo XwHYonSwOAEbi4nPxnvKIm4aUNq+PC44TH0VJcXud3tmQ/DGipdlLW8/nyw9ecfa qaQ= =GT3J -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping updates from Christoph Hellwig: - enhance the dma pool to allow atomic allocation on x86 with AMD SEV (David Rientjes) - two small cleanups (Jason Yan and Peter Collingbourne) * tag 'dma-mapping-5.8' of git://git.infradead.org/users/hch/dma-mapping: dma-contiguous: fix comment for dma_release_from_contiguous dma-pool: scale the default DMA coherent pool size with memory capacity x86/mm: unencrypted non-blocking DMA allocations use coherent pools dma-pool: add pool sizes to debugfs dma-direct: atomic allocations must come from atomic coherent pools dma-pool: dynamically expanding atomic pools dma-pool: add additional coherent pools to map to gfp mask dma-remap: separate DMA atomic pools from direct remap code dma-debug: make __dma_entry_alloc_check_leak() static
71 lines
1.6 KiB
C
71 lines
1.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Copyright (c) 2014 The Linux Foundation
|
|
*/
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/vmalloc.h>
|
|
|
|
struct page **dma_common_find_pages(void *cpu_addr)
|
|
{
|
|
struct vm_struct *area = find_vm_area(cpu_addr);
|
|
|
|
if (!area || area->flags != VM_DMA_COHERENT)
|
|
return NULL;
|
|
return area->pages;
|
|
}
|
|
|
|
/*
|
|
* Remaps an array of PAGE_SIZE pages into another vm_area.
|
|
* Cannot be used in non-sleeping contexts
|
|
*/
|
|
void *dma_common_pages_remap(struct page **pages, size_t size,
|
|
pgprot_t prot, const void *caller)
|
|
{
|
|
void *vaddr;
|
|
|
|
vaddr = vmap(pages, size >> PAGE_SHIFT, VM_DMA_COHERENT, prot);
|
|
if (vaddr)
|
|
find_vm_area(vaddr)->pages = pages;
|
|
return vaddr;
|
|
}
|
|
|
|
/*
|
|
* Remaps an allocated contiguous region into another vm_area.
|
|
* Cannot be used in non-sleeping contexts
|
|
*/
|
|
void *dma_common_contiguous_remap(struct page *page, size_t size,
|
|
pgprot_t prot, const void *caller)
|
|
{
|
|
int count = size >> PAGE_SHIFT;
|
|
struct page **pages;
|
|
void *vaddr;
|
|
int i;
|
|
|
|
pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL);
|
|
if (!pages)
|
|
return NULL;
|
|
for (i = 0; i < count; i++)
|
|
pages[i] = nth_page(page, i);
|
|
vaddr = vmap(pages, count, VM_DMA_COHERENT, prot);
|
|
kfree(pages);
|
|
|
|
return vaddr;
|
|
}
|
|
|
|
/*
|
|
* Unmaps a range previously mapped by dma_common_*_remap
|
|
*/
|
|
void dma_common_free_remap(void *cpu_addr, size_t size)
|
|
{
|
|
struct vm_struct *area = find_vm_area(cpu_addr);
|
|
|
|
if (!area || area->flags != VM_DMA_COHERENT) {
|
|
WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
|
|
return;
|
|
}
|
|
|
|
unmap_kernel_range((unsigned long)cpu_addr, PAGE_ALIGN(size));
|
|
vunmap(cpu_addr);
|
|
}
|