dma-mapping: remove the DMA_ATTR_WRITE_BARRIER flag
This flag is not implemented by any backend and only set by the ib_umem module in a single instance. Link: https://lore.kernel.org/r/20191113073214.9514-2-hch@lst.de Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Jason Gunthorpe <jgg@mellanox.com> Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
This commit is contained in:
parent
64c264872b
commit
7283fff8b5
|
@ -5,24 +5,6 @@ DMA attributes
|
|||
This document describes the semantics of the DMA attributes that are
|
||||
defined in linux/dma-mapping.h.
|
||||
|
||||
DMA_ATTR_WRITE_BARRIER
|
||||
----------------------
|
||||
|
||||
DMA_ATTR_WRITE_BARRIER is a (write) barrier attribute for DMA. DMA
|
||||
to a memory region with the DMA_ATTR_WRITE_BARRIER attribute forces
|
||||
all pending DMA writes to complete, and thus provides a mechanism to
|
||||
strictly order DMA from a device across all intervening busses and
|
||||
bridges. This barrier is not specific to a particular type of
|
||||
interconnect, it applies to the system as a whole, and so its
|
||||
implementation must account for the idiosyncrasies of the system all
|
||||
the way from the DMA device to memory.
|
||||
|
||||
As an example of a situation where DMA_ATTR_WRITE_BARRIER would be
|
||||
useful, suppose that a device does a DMA write to indicate that data is
|
||||
ready and available in memory. The DMA of the "completion indication"
|
||||
could race with data DMA. Mapping the memory used for completion
|
||||
indications with DMA_ATTR_WRITE_BARRIER would prevent the race.
|
||||
|
||||
DMA_ATTR_WEAK_ORDERING
|
||||
----------------------
|
||||
|
||||
|
|
|
@ -199,7 +199,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
|
|||
struct mm_struct *mm;
|
||||
unsigned long npages;
|
||||
int ret;
|
||||
unsigned long dma_attrs = 0;
|
||||
struct scatterlist *sg;
|
||||
unsigned int gup_flags = FOLL_WRITE;
|
||||
|
||||
|
@ -211,9 +210,6 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
|
|||
if (!context)
|
||||
return ERR_PTR(-EIO);
|
||||
|
||||
if (dmasync)
|
||||
dma_attrs |= DMA_ATTR_WRITE_BARRIER;
|
||||
|
||||
/*
|
||||
* If the combination of the addr and size requested for this memory
|
||||
* region causes an integer overflow, return error.
|
||||
|
@ -294,11 +290,10 @@ struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
|
|||
|
||||
sg_mark_end(sg);
|
||||
|
||||
umem->nmap = ib_dma_map_sg_attrs(context->device,
|
||||
umem->nmap = ib_dma_map_sg(context->device,
|
||||
umem->sg_head.sgl,
|
||||
umem->sg_nents,
|
||||
DMA_BIDIRECTIONAL,
|
||||
dma_attrs);
|
||||
DMA_BIDIRECTIONAL);
|
||||
|
||||
if (!umem->nmap) {
|
||||
ret = -ENOMEM;
|
||||
|
|
|
@ -15,11 +15,8 @@
|
|||
/**
|
||||
* List of possible attributes associated with a DMA mapping. The semantics
|
||||
* of each attribute should be defined in Documentation/DMA-attributes.txt.
|
||||
*
|
||||
* DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
|
||||
* forces all pending DMA writes to complete.
|
||||
*/
|
||||
#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
|
||||
|
||||
/*
|
||||
* DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
|
||||
* may be weakly ordered, that is that reads and writes may pass each other.
|
||||
|
|
Loading…
Reference in New Issue
Block a user