forked from luck/tmp_suning_uos_patched
dma-mapping fixes for 5.3-rc1
Fix various regressions: - force unencrypted dma-coherent buffers if encryption bit can't fit into the dma coherent mask (Tom Lendacky) - avoid limiting request size if swiotlb is not used (me) - fix swiotlb handling in dma_direct_sync_sg_for_cpu/device (Fugang Duan) -----BEGIN PGP SIGNATURE----- iQI/BAABCgApFiEEgdbnc3r/njty3Iq9D55TZVIEUYMFAl0zTvELHGhjaEBsc3Qu ZGUACgkQD55TZVIEUYMAsQ/6AleklMsMbc1xPsYYMukmjAOUNf+nvsFG4PRs/KVn 1/Yohkxx/FN3oXZ+zZEnyd8a5u0ghwkN1WDivEhpclzbDuQP+Z+jEDmb37Oea4aJ L6XRLQJYiFwwEA6oJ87FNVMZXK/QUo+/lnDvJg0xNW6+HiR4GAUmnqy+/KyEIRSf SX+aiUOX4tUkwHPWyMaWvTlZ4hZgSovXwkUnR08jCwyJFezUwJBr/Yf5G6M1C10B hPFTrREhaekXgFd5E1dwKNk5omvfihxGyVUujFZhtMvs//LP8GcFLcVtYRWM/SUZ XpKkXxnaRC0gEm2P4/tSEGL3xl1CST/oYde74KNBQDIe0svGFS0QrP68+4zu/1ih vaf2gHoCoJciFY2DHglw1OG/gMWW06OtdseOKe9LZXtsGA6HCVBZW4c01V5YHVQT TMQMr0UyxJzmrxCo+LafAf9DoQxIii8WapewomwceL0TUtIDIujirzC/ieLhNPKL L2Fk+zPtFL24IpVe52S1PngatlW4MioiyiJji1QM0RK1V68+r/nSKPBxeq9s+jR3 CfGvfhfRDd/NbZ9m66YFUaRzHL6Fpi2hMvJc9O6dgcVEYEBrL0d8J9nH42cqOlfe OBGeCxnFNQMuBp4Tw1OZO9PjzR3+pQOb32pOWLDUUs9ed3gtdMrJYTKhw9/cLpyp 838= =Bv+Q -----END PGP SIGNATURE----- Merge tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping Pull dma-mapping fixes from Christoph Hellwig: "Fix various regressions: - force unencrypted dma-coherent buffers if encryption bit can't fit into the dma coherent mask (Tom Lendacky) - avoid limiting request size if swiotlb is not used (me) - fix swiotlb handling in dma_direct_sync_sg_for_cpu/device (Fugang Duan)" * tag 'dma-mapping-5.3-1' of git://git.infradead.org/users/hch/dma-mapping: dma-direct: correct the physical addr in dma_direct_sync_sg_for_cpu/device dma-direct: only limit the mapping size if swiotlb could be used dma-mapping: add a dma_addressing_limited helper dma-direct: Force unencrypted DMA under SME for certain DMA masks
This commit is contained in:
commit
ac60602a6d
|
@ -189,6 +189,7 @@ config S390
|
|||
select VIRT_CPU_ACCOUNTING
|
||||
select ARCH_HAS_SCALED_CPUTIME
|
||||
select HAVE_NMI
|
||||
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
select SWIOTLB
|
||||
select GENERIC_ALLOCATOR
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/cma.h>
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/dma-direct.h>
|
||||
#include <asm/processor.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/pgtable.h>
|
||||
|
@ -161,6 +161,11 @@ bool sev_active(void)
|
|||
return is_prot_virt_guest();
|
||||
}
|
||||
|
||||
bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
return sev_active();
|
||||
}
|
||||
|
||||
/* protected virtualization */
|
||||
static void pv_init(void)
|
||||
{
|
||||
|
|
|
@ -1526,6 +1526,7 @@ config AMD_MEM_ENCRYPT
|
|||
depends on X86_64 && CPU_SUP_AMD
|
||||
select DYNAMIC_PHYSICAL_MASK
|
||||
select ARCH_USE_MEMREMAP_PROT
|
||||
select ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
---help---
|
||||
Say yes to enable support for the encryption of system memory.
|
||||
This requires an AMD processor that supports Secure Memory
|
||||
|
|
|
@ -15,6 +15,10 @@
|
|||
#include <linux/dma-direct.h>
|
||||
#include <linux/swiotlb.h>
|
||||
#include <linux/mem_encrypt.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
@ -348,6 +352,32 @@ bool sev_active(void)
|
|||
}
|
||||
EXPORT_SYMBOL(sev_active);
|
||||
|
||||
/* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
||||
bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* For SEV, all DMA must be to unencrypted addresses.
|
||||
*/
|
||||
if (sev_active())
|
||||
return true;
|
||||
|
||||
/*
|
||||
* For SME, all DMA must be to unencrypted addresses if the
|
||||
* device does not support DMA to addresses that include the
|
||||
* encryption mask.
|
||||
*/
|
||||
if (sme_active()) {
|
||||
u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
|
||||
u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
|
||||
dev->bus_dma_mask);
|
||||
|
||||
if (dma_dev_mask <= dma_enc_mask)
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Architecture __weak replacement functions */
|
||||
void __init mem_encrypt_free_decrypted_mem(void)
|
||||
{
|
||||
|
|
|
@ -32,6 +32,15 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||
}
|
||||
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
bool force_dma_unencrypted(struct device *dev);
|
||||
#else
|
||||
static inline bool force_dma_unencrypted(struct device *dev)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
#endif /* CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED */
|
||||
|
||||
/*
|
||||
* If memory encryption is supported, phys_to_dma will set the memory encryption
|
||||
* bit in the DMA address, and dma_to_phys will clear it. The raw __phys_to_dma
|
||||
|
|
|
@ -679,6 +679,20 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
|
|||
return dma_set_mask_and_coherent(dev, mask);
|
||||
}
|
||||
|
||||
/**
|
||||
* dma_addressing_limited - return if the device is addressing limited
|
||||
* @dev: device to check
|
||||
*
|
||||
* Return %true if the devices DMA mask is too small to address all memory in
|
||||
* the system, else %false. Lack of addressing bits is the prime reason for
|
||||
* bounce buffering, but might not be the only one.
|
||||
*/
|
||||
static inline bool dma_addressing_limited(struct device *dev)
|
||||
{
|
||||
return min_not_zero(*dev->dma_mask, dev->bus_dma_mask) <
|
||||
dma_get_required_mask(dev);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
|
||||
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
|
||||
const struct iommu_ops *iommu, bool coherent);
|
||||
|
|
|
@ -48,6 +48,9 @@ config ARCH_HAS_DMA_COHERENT_TO_PFN
|
|||
config ARCH_HAS_DMA_MMAP_PGPROT
|
||||
bool
|
||||
|
||||
config ARCH_HAS_FORCE_DMA_UNENCRYPTED
|
||||
bool
|
||||
|
||||
config DMA_NONCOHERENT_CACHE_SYNC
|
||||
bool
|
||||
|
||||
|
|
|
@ -23,14 +23,6 @@
|
|||
#define ARCH_ZONE_DMA_BITS 24
|
||||
#endif
|
||||
|
||||
/*
|
||||
* For AMD SEV all DMA must be to unencrypted addresses.
|
||||
*/
|
||||
static inline bool force_dma_unencrypted(void)
|
||||
{
|
||||
return sev_active();
|
||||
}
|
||||
|
||||
static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
|
||||
{
|
||||
if (!dev->dma_mask) {
|
||||
|
@ -46,7 +38,7 @@ static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
|
|||
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
|
||||
phys_addr_t phys)
|
||||
{
|
||||
if (force_dma_unencrypted())
|
||||
if (force_dma_unencrypted(dev))
|
||||
return __phys_to_dma(dev, phys);
|
||||
return phys_to_dma(dev, phys);
|
||||
}
|
||||
|
@ -67,7 +59,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
|
|||
if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
|
||||
dma_mask = dev->bus_dma_mask;
|
||||
|
||||
if (force_dma_unencrypted())
|
||||
if (force_dma_unencrypted(dev))
|
||||
*phys_mask = __dma_to_phys(dev, dma_mask);
|
||||
else
|
||||
*phys_mask = dma_to_phys(dev, dma_mask);
|
||||
|
@ -159,7 +151,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
|
|||
}
|
||||
|
||||
ret = page_address(page);
|
||||
if (force_dma_unencrypted()) {
|
||||
if (force_dma_unencrypted(dev)) {
|
||||
set_memory_decrypted((unsigned long)ret, 1 << get_order(size));
|
||||
*dma_handle = __phys_to_dma(dev, page_to_phys(page));
|
||||
} else {
|
||||
|
@ -192,7 +184,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
|
|||
return;
|
||||
}
|
||||
|
||||
if (force_dma_unencrypted())
|
||||
if (force_dma_unencrypted(dev))
|
||||
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
|
||||
|
||||
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) &&
|
||||
|
@ -242,12 +234,14 @@ void dma_direct_sync_sg_for_device(struct device *dev,
|
|||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
|
||||
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length,
|
||||
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, sg->length,
|
||||
dir, SYNC_FOR_DEVICE);
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_device(dev, sg_phys(sg), sg->length,
|
||||
arch_sync_dma_for_device(dev, paddr, sg->length,
|
||||
dir);
|
||||
}
|
||||
}
|
||||
|
@ -279,11 +273,13 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
|
|||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
|
||||
|
||||
if (!dev_is_dma_coherent(dev))
|
||||
arch_sync_dma_for_cpu(dev, sg_phys(sg), sg->length, dir);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(sg_phys(sg))))
|
||||
swiotlb_tbl_sync_single(dev, sg_phys(sg), sg->length, dir,
|
||||
arch_sync_dma_for_cpu(dev, paddr, sg->length, dir);
|
||||
|
||||
if (unlikely(is_swiotlb_buffer(paddr)))
|
||||
swiotlb_tbl_sync_single(dev, paddr, sg->length, dir,
|
||||
SYNC_FOR_CPU);
|
||||
}
|
||||
|
||||
|
@ -407,11 +403,9 @@ int dma_direct_supported(struct device *dev, u64 mask)
|
|||
|
||||
size_t dma_direct_max_mapping_size(struct device *dev)
|
||||
{
|
||||
size_t size = SIZE_MAX;
|
||||
|
||||
/* If SWIOTLB is active, use its maximum mapping size */
|
||||
if (is_swiotlb_active())
|
||||
size = swiotlb_max_mapping_size(dev);
|
||||
|
||||
return size;
|
||||
if (is_swiotlb_active() &&
|
||||
(dma_addressing_limited(dev) || swiotlb_force == SWIOTLB_FORCE))
|
||||
return swiotlb_max_mapping_size(dev);
|
||||
return SIZE_MAX;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user