forked from luck/tmp_suning_uos_patched
Merge commit 'kumar/kumar-dma'
This commit is contained in:
commit
9f5494b797
@ -16,9 +16,6 @@ struct dev_archdata {
|
||||
/* DMA operations on that device */
|
||||
struct dma_mapping_ops *dma_ops;
|
||||
void *dma_data;
|
||||
|
||||
/* NUMA node if applicable */
|
||||
int numa_node;
|
||||
};
|
||||
|
||||
#endif /* _ASM_POWERPC_DEVICE_H */
|
||||
|
@ -44,8 +44,6 @@ extern void __dma_sync_page(struct page *page, unsigned long offset,
|
||||
|
||||
#endif /* ! CONFIG_NOT_COHERENT_CACHE */
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
static inline unsigned long device_to_mask(struct device *dev)
|
||||
{
|
||||
if (dev->dma_mask && *dev->dma_mask)
|
||||
@ -76,8 +74,24 @@ struct dma_mapping_ops {
|
||||
struct dma_attrs *attrs);
|
||||
int (*dma_supported)(struct device *dev, u64 mask);
|
||||
int (*set_dma_mask)(struct device *dev, u64 dma_mask);
|
||||
dma_addr_t (*map_page)(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs);
|
||||
void (*unmap_page)(struct device *dev,
|
||||
dma_addr_t dma_address, size_t size,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs);
|
||||
};
|
||||
|
||||
/*
|
||||
* Available generic sets of operations
|
||||
*/
|
||||
#ifdef CONFIG_PPC64
|
||||
extern struct dma_mapping_ops dma_iommu_ops;
|
||||
#endif
|
||||
extern struct dma_mapping_ops dma_direct_ops;
|
||||
|
||||
static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
||||
{
|
||||
/* We don't handle the NULL dev case for ISA for now. We could
|
||||
@ -85,8 +99,19 @@ static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
|
||||
* only ISA DMA device we support is the floppy and we have a hack
|
||||
* in the floppy driver directly to get a device for us.
|
||||
*/
|
||||
if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
|
||||
|
||||
if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
|
||||
#ifdef CONFIG_PPC64
|
||||
return NULL;
|
||||
#else
|
||||
/* Use default on 32-bit if dma_ops is not set up */
|
||||
/* TODO: Long term, we should fix drivers so that dev and
|
||||
* archdata dma_ops are set up for all buses.
|
||||
*/
|
||||
return &dma_direct_ops;
|
||||
#endif
|
||||
}
|
||||
|
||||
return dev->archdata.dma_ops;
|
||||
}
|
||||
|
||||
@ -123,6 +148,12 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: map_/unmap_single will ideally go away, to be completely
|
||||
* replaced by map/unmap_page. Until then, we allow dma_ops to have
|
||||
* one or the other, or both by checking to see if the specific
|
||||
* function requested exists; and if not, falling back on the other set.
|
||||
*/
|
||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev,
|
||||
void *cpu_addr,
|
||||
size_t size,
|
||||
@ -132,7 +163,14 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
return dma_ops->map_single(dev, cpu_addr, size, direction, attrs);
|
||||
|
||||
if (dma_ops->map_single)
|
||||
return dma_ops->map_single(dev, cpu_addr, size, direction,
|
||||
attrs);
|
||||
|
||||
return dma_ops->map_page(dev, virt_to_page(cpu_addr),
|
||||
(unsigned long)cpu_addr % PAGE_SIZE, size,
|
||||
direction, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_single_attrs(struct device *dev,
|
||||
@ -144,7 +182,13 @@ static inline void dma_unmap_single_attrs(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
|
||||
|
||||
if (dma_ops->unmap_single) {
|
||||
dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
@ -156,8 +200,13 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
if (dma_ops->map_page)
|
||||
return dma_ops->map_page(dev, page, offset, size, direction,
|
||||
attrs);
|
||||
|
||||
return dma_ops->map_single(dev, page_address(page) + offset, size,
|
||||
direction, attrs);
|
||||
direction, attrs);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
@ -169,6 +218,12 @@ static inline void dma_unmap_page_attrs(struct device *dev,
|
||||
struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
|
||||
|
||||
BUG_ON(!dma_ops);
|
||||
|
||||
if (dma_ops->unmap_page) {
|
||||
dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
|
||||
return;
|
||||
}
|
||||
|
||||
dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
|
||||
}
|
||||
|
||||
@ -253,126 +308,6 @@ static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
|
||||
}
|
||||
|
||||
/*
|
||||
* Available generic sets of operations
|
||||
*/
|
||||
extern struct dma_mapping_ops dma_iommu_ops;
|
||||
extern struct dma_mapping_ops dma_direct_ops;
|
||||
|
||||
#else /* CONFIG_PPC64 */
|
||||
|
||||
#define dma_supported(dev, mask) (1)
|
||||
|
||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
||||
{
|
||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||
return -EIO;
|
||||
|
||||
*dev->dma_mask = dma_mask;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t * dma_handle,
|
||||
gfp_t gfp)
|
||||
{
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
return __dma_alloc_coherent(size, dma_handle, gfp);
|
||||
#else
|
||||
void *ret;
|
||||
/* ignore region specifiers */
|
||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||
|
||||
if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
|
||||
gfp |= GFP_DMA;
|
||||
|
||||
ret = (void *)__get_free_pages(gfp, get_order(size));
|
||||
|
||||
if (ret != NULL) {
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = virt_to_bus(ret);
|
||||
}
|
||||
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline void
|
||||
dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||
dma_addr_t dma_handle)
|
||||
{
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
__dma_free_coherent(size, vaddr);
|
||||
#else
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
dma_map_single(struct device *dev, void *ptr, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
__dma_sync(ptr, size, direction);
|
||||
|
||||
return virt_to_bus(ptr);
|
||||
}
|
||||
|
||||
static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* We do nothing. */
|
||||
}
|
||||
|
||||
static inline dma_addr_t
|
||||
dma_map_page(struct device *dev, struct page *page,
|
||||
unsigned long offset, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
__dma_sync_page(page, offset, size, direction);
|
||||
|
||||
return page_to_bus(page) + offset;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
|
||||
size_t size,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* We do nothing. */
|
||||
}
|
||||
|
||||
static inline int
|
||||
dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
BUG_ON(direction == DMA_NONE);
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
BUG_ON(!sg_page(sg));
|
||||
__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
|
||||
sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nhwentries,
|
||||
enum dma_data_direction direction)
|
||||
{
|
||||
/* We don't do anything here. */
|
||||
}
|
||||
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
static inline void dma_sync_single_for_cpu(struct device *dev,
|
||||
dma_addr_t dma_handle, size_t size,
|
||||
enum dma_data_direction direction)
|
||||
|
@ -88,8 +88,6 @@ struct machdep_calls {
|
||||
unsigned long (*tce_get)(struct iommu_table *tbl,
|
||||
long index);
|
||||
void (*tce_flush)(struct iommu_table *tbl);
|
||||
void (*pci_dma_dev_setup)(struct pci_dev *dev);
|
||||
void (*pci_dma_bus_setup)(struct pci_bus *bus);
|
||||
|
||||
void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size,
|
||||
unsigned long flags);
|
||||
@ -101,6 +99,9 @@ struct machdep_calls {
|
||||
#endif
|
||||
#endif /* CONFIG_PPC64 */
|
||||
|
||||
void (*pci_dma_dev_setup)(struct pci_dev *dev);
|
||||
void (*pci_dma_bus_setup)(struct pci_bus *bus);
|
||||
|
||||
int (*probe)(void);
|
||||
void (*setup_arch)(void); /* Optional, may be NULL */
|
||||
void (*init_early)(void);
|
||||
|
@ -60,6 +60,14 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
return channel ? 15 : 14;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
|
||||
extern struct dma_mapping_ops *get_pci_dma_ops(void);
|
||||
#else /* CONFIG_PCI */
|
||||
#define set_pci_dma_ops(d)
|
||||
#define get_pci_dma_ops() NULL
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PPC64
|
||||
|
||||
/*
|
||||
@ -70,9 +78,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
|
||||
#define PCI_DISABLE_MWI
|
||||
|
||||
#ifdef CONFIG_PCI
|
||||
extern void set_pci_dma_ops(struct dma_mapping_ops *dma_ops);
|
||||
extern struct dma_mapping_ops *get_pci_dma_ops(void);
|
||||
|
||||
static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
enum pci_dma_burst_strategy *strat,
|
||||
unsigned long *strategy_parameter)
|
||||
@ -89,9 +94,6 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
|
||||
*strat = PCI_DMA_BURST_MULTIPLE;
|
||||
*strategy_parameter = cacheline_size;
|
||||
}
|
||||
#else /* CONFIG_PCI */
|
||||
#define set_pci_dma_ops(d)
|
||||
#define get_pci_dma_ops() NULL
|
||||
#endif
|
||||
|
||||
#else /* 32-bit */
|
||||
|
@ -55,7 +55,7 @@ typedef u64 phys_addr_t;
|
||||
typedef u32 phys_addr_t;
|
||||
#endif
|
||||
|
||||
#ifdef __powerpc64__
|
||||
#if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)
|
||||
typedef u64 dma_addr_t;
|
||||
#else
|
||||
typedef u32 dma_addr_t;
|
||||
|
@ -70,10 +70,10 @@ extra-$(CONFIG_8xx) := head_8xx.o
|
||||
extra-y += vmlinux.lds
|
||||
|
||||
obj-y += time.o prom.o traps.o setup-common.o \
|
||||
udbg.o misc.o io.o \
|
||||
udbg.o misc.o io.o dma.o \
|
||||
misc_$(CONFIG_WORD_SIZE).o
|
||||
obj-$(CONFIG_PPC32) += entry_32.o setup_32.o
|
||||
obj-$(CONFIG_PPC64) += dma_64.o iommu.o
|
||||
obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o
|
||||
obj-$(CONFIG_KGDB) += kgdb.o
|
||||
obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o
|
||||
obj-$(CONFIG_MODULES) += ppc_ksyms.o
|
||||
|
@ -2,14 +2,10 @@
|
||||
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
|
||||
*
|
||||
* Provide default implementations of the DMA mapping callbacks for
|
||||
* directly mapped busses and busses using the iommu infrastructure
|
||||
* busses using the iommu infrastructure
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/abs_addr.h>
|
||||
|
||||
/*
|
||||
* Generic iommu implementation
|
||||
@ -24,7 +20,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size,
|
||||
{
|
||||
return iommu_alloc_coherent(dev, dev->archdata.dma_data, size,
|
||||
dma_handle, device_to_mask(dev), flag,
|
||||
dev->archdata.numa_node);
|
||||
dev_to_node(dev));
|
||||
}
|
||||
|
||||
static void dma_iommu_free_coherent(struct device *dev, size_t size,
|
||||
@ -105,96 +101,3 @@ struct dma_mapping_ops dma_iommu_ops = {
|
||||
.dma_supported = dma_iommu_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_iommu_ops);
|
||||
|
||||
/*
|
||||
* Generic direct DMA implementation
|
||||
*
|
||||
* This implementation supports a per-device offset that can be applied if
|
||||
* the address at which memory is visible to devices is not 0. Platform code
|
||||
* can set archdata.dma_data to an unsigned long holding the offset. By
|
||||
* default the offset is zero.
|
||||
*/
|
||||
|
||||
static unsigned long get_dma_direct_offset(struct device *dev)
|
||||
{
|
||||
return (unsigned long)dev->archdata.dma_data;
|
||||
}
|
||||
|
||||
static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
struct page *page;
|
||||
void *ret;
|
||||
int node = dev->archdata.numa_node;
|
||||
|
||||
page = alloc_pages_node(node, flag, get_order(size));
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
ret = page_address(page);
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
}
|
||||
|
||||
static dma_addr_t dma_direct_map_single(struct device *dev, void *ptr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
return virt_to_abs(ptr) + get_dma_direct_offset(dev);
|
||||
}
|
||||
|
||||
static void dma_direct_unmap_single(struct device *dev, dma_addr_t dma_addr,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static int dma_direct_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
/* Could be improved to check for memory though it better be
|
||||
* done via some global so platforms can set the limit in case
|
||||
* they have limited DMA windows
|
||||
*/
|
||||
return mask >= DMA_32BIT_MASK;
|
||||
}
|
||||
|
||||
struct dma_mapping_ops dma_direct_ops = {
|
||||
.alloc_coherent = dma_direct_alloc_coherent,
|
||||
.free_coherent = dma_direct_free_coherent,
|
||||
.map_single = dma_direct_map_single,
|
||||
.unmap_single = dma_direct_unmap_single,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.dma_supported = dma_direct_dma_supported,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_direct_ops);
|
127
arch/powerpc/kernel/dma.c
Normal file
127
arch/powerpc/kernel/dma.c
Normal file
@ -0,0 +1,127 @@
|
||||
/*
|
||||
* Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
|
||||
*
|
||||
* Provide default implementations of the DMA mapping callbacks for
|
||||
* directly mapped busses.
|
||||
*/
|
||||
|
||||
#include <linux/device.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <asm/bug.h>
|
||||
#include <asm/abs_addr.h>
|
||||
|
||||
/*
|
||||
* Generic direct DMA implementation
|
||||
*
|
||||
* This implementation supports a per-device offset that can be applied if
|
||||
* the address at which memory is visible to devices is not 0. Platform code
|
||||
* can set archdata.dma_data to an unsigned long holding the offset. By
|
||||
* default the offset is PCI_DRAM_OFFSET.
|
||||
*/
|
||||
|
||||
static unsigned long get_dma_direct_offset(struct device *dev)
|
||||
{
|
||||
if (dev)
|
||||
return (unsigned long)dev->archdata.dma_data;
|
||||
|
||||
return PCI_DRAM_OFFSET;
|
||||
}
|
||||
|
||||
void *dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||
dma_addr_t *dma_handle, gfp_t flag)
|
||||
{
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
return __dma_alloc_coherent(size, dma_handle, flag);
|
||||
#else
|
||||
struct page *page;
|
||||
void *ret;
|
||||
int node = dev_to_node(dev);
|
||||
|
||||
/* ignore region specifiers */
|
||||
flag &= ~(__GFP_HIGHMEM);
|
||||
|
||||
page = alloc_pages_node(node, flag, get_order(size));
|
||||
if (page == NULL)
|
||||
return NULL;
|
||||
ret = page_address(page);
|
||||
memset(ret, 0, size);
|
||||
*dma_handle = virt_to_abs(ret) + get_dma_direct_offset(dev);
|
||||
|
||||
return ret;
|
||||
#endif
|
||||
}
|
||||
|
||||
void dma_direct_free_coherent(struct device *dev, size_t size,
|
||||
void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
#ifdef CONFIG_NOT_COHERENT_CACHE
|
||||
__dma_free_coherent(size, vaddr);
|
||||
#else
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
#endif
|
||||
}
|
||||
|
||||
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
|
||||
int nents, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
|
||||
for_each_sg(sgl, sg, nents, i) {
|
||||
sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
|
||||
sg->dma_length = sg->length;
|
||||
}
|
||||
|
||||
return nents;
|
||||
}
|
||||
|
||||
static void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sg,
|
||||
int nents, enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
static int dma_direct_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
#ifdef CONFIG_PPC64
|
||||
/* Could be improved to check for memory though it better be
|
||||
* done via some global so platforms can set the limit in case
|
||||
* they have limited DMA windows
|
||||
*/
|
||||
return mask >= DMA_32BIT_MASK;
|
||||
#else
|
||||
return 1;
|
||||
#endif
|
||||
}
|
||||
|
||||
static inline dma_addr_t dma_direct_map_page(struct device *dev,
|
||||
struct page *page,
|
||||
unsigned long offset,
|
||||
size_t size,
|
||||
enum dma_data_direction dir,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
BUG_ON(dir == DMA_NONE);
|
||||
__dma_sync_page(page, offset, size, dir);
|
||||
return page_to_phys(page) + offset + get_dma_direct_offset(dev);
|
||||
}
|
||||
|
||||
static inline void dma_direct_unmap_page(struct device *dev,
|
||||
dma_addr_t dma_address,
|
||||
size_t size,
|
||||
enum dma_data_direction direction,
|
||||
struct dma_attrs *attrs)
|
||||
{
|
||||
}
|
||||
|
||||
struct dma_mapping_ops dma_direct_ops = {
|
||||
.alloc_coherent = dma_direct_alloc_coherent,
|
||||
.free_coherent = dma_direct_free_coherent,
|
||||
.map_sg = dma_direct_map_sg,
|
||||
.unmap_sg = dma_direct_unmap_sg,
|
||||
.dma_supported = dma_direct_dma_supported,
|
||||
.map_page = dma_direct_map_page,
|
||||
.unmap_page = dma_direct_unmap_page,
|
||||
};
|
||||
EXPORT_SYMBOL(dma_direct_ops);
|
@ -78,7 +78,7 @@ struct of_device *of_device_alloc(struct device_node *np,
|
||||
dev->dev.parent = parent;
|
||||
dev->dev.release = of_release_dev;
|
||||
dev->dev.archdata.of_node = np;
|
||||
dev->dev.archdata.numa_node = of_node_to_nid(np);
|
||||
set_dev_node(&dev->dev, of_node_to_nid(np));
|
||||
|
||||
if (bus_id)
|
||||
strlcpy(dev->dev.bus_id, bus_id, BUS_ID_SIZE);
|
||||
|
@ -56,6 +56,34 @@ resource_size_t isa_mem_base;
|
||||
/* Default PCI flags is 0 */
|
||||
unsigned int ppc_pci_flags;
|
||||
|
||||
static struct dma_mapping_ops *pci_dma_ops;
|
||||
|
||||
void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
|
||||
{
|
||||
pci_dma_ops = dma_ops;
|
||||
}
|
||||
|
||||
struct dma_mapping_ops *get_pci_dma_ops(void)
|
||||
{
|
||||
return pci_dma_ops;
|
||||
}
|
||||
EXPORT_SYMBOL(get_pci_dma_ops);
|
||||
|
||||
int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
return dma_set_mask(&dev->dev, mask);
|
||||
}
|
||||
|
||||
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = dma_set_mask(&dev->dev, mask);
|
||||
dev->dev.coherent_dma_mask = dev->dma_mask;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
|
||||
{
|
||||
struct pci_controller *phb;
|
||||
@ -180,6 +208,26 @@ char __devinit *pcibios_setup(char *str)
|
||||
return str;
|
||||
}
|
||||
|
||||
void __devinit pcibios_setup_new_device(struct pci_dev *dev)
|
||||
{
|
||||
struct dev_archdata *sd = &dev->dev.archdata;
|
||||
|
||||
sd->of_node = pci_device_to_OF_node(dev);
|
||||
|
||||
DBG("PCI: device %s OF node: %s\n", pci_name(dev),
|
||||
sd->of_node ? sd->of_node->full_name : "<none>");
|
||||
|
||||
sd->dma_ops = pci_dma_ops;
|
||||
#ifdef CONFIG_PPC32
|
||||
sd->dma_data = (void *)PCI_DRAM_OFFSET;
|
||||
#endif
|
||||
set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
|
||||
|
||||
if (ppc_md.pci_dma_dev_setup)
|
||||
ppc_md.pci_dma_dev_setup(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(pcibios_setup_new_device);
|
||||
|
||||
/*
|
||||
* Reads the interrupt pin to determine if interrupt is use by card.
|
||||
* If the interrupt is used, then gets the interrupt line from the
|
||||
|
@ -424,6 +424,7 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
|
||||
unsigned long io_offset;
|
||||
struct resource *res;
|
||||
int i;
|
||||
struct pci_dev *dev;
|
||||
|
||||
/* Hookup PHB resources */
|
||||
io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
|
||||
@ -457,6 +458,12 @@ void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
|
||||
bus->resource[i+1] = res;
|
||||
}
|
||||
}
|
||||
|
||||
if (ppc_md.pci_dma_bus_setup)
|
||||
ppc_md.pci_dma_bus_setup(bus);
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list)
|
||||
pcibios_setup_new_device(dev);
|
||||
}
|
||||
|
||||
/* the next one is stolen from the alpha port... */
|
||||
|
@ -52,35 +52,6 @@ EXPORT_SYMBOL(pci_io_base);
|
||||
|
||||
LIST_HEAD(hose_list);
|
||||
|
||||
static struct dma_mapping_ops *pci_dma_ops;
|
||||
|
||||
void set_pci_dma_ops(struct dma_mapping_ops *dma_ops)
|
||||
{
|
||||
pci_dma_ops = dma_ops;
|
||||
}
|
||||
|
||||
struct dma_mapping_ops *get_pci_dma_ops(void)
|
||||
{
|
||||
return pci_dma_ops;
|
||||
}
|
||||
EXPORT_SYMBOL(get_pci_dma_ops);
|
||||
|
||||
|
||||
int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
return dma_set_mask(&dev->dev, mask);
|
||||
}
|
||||
|
||||
int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
|
||||
{
|
||||
int rc;
|
||||
|
||||
rc = dma_set_mask(&dev->dev, mask);
|
||||
dev->dev.coherent_dma_mask = dev->dma_mask;
|
||||
|
||||
return rc;
|
||||
}
|
||||
|
||||
static void fixup_broken_pcnet32(struct pci_dev* dev)
|
||||
{
|
||||
if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
|
||||
@ -548,26 +519,6 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(pcibios_map_io_space);
|
||||
|
||||
void __devinit pcibios_setup_new_device(struct pci_dev *dev)
|
||||
{
|
||||
struct dev_archdata *sd = &dev->dev.archdata;
|
||||
|
||||
sd->of_node = pci_device_to_OF_node(dev);
|
||||
|
||||
DBG("PCI: device %s OF node: %s\n", pci_name(dev),
|
||||
sd->of_node ? sd->of_node->full_name : "<none>");
|
||||
|
||||
sd->dma_ops = pci_dma_ops;
|
||||
#ifdef CONFIG_NUMA
|
||||
sd->numa_node = pcibus_to_node(dev->bus);
|
||||
#else
|
||||
sd->numa_node = -1;
|
||||
#endif
|
||||
if (ppc_md.pci_dma_dev_setup)
|
||||
ppc_md.pci_dma_dev_setup(dev);
|
||||
}
|
||||
EXPORT_SYMBOL(pcibios_setup_new_device);
|
||||
|
||||
void __devinit pcibios_do_bus_setup(struct pci_bus *bus)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
@ -1232,7 +1232,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
|
||||
else
|
||||
viodev->dev.archdata.dma_ops = &dma_iommu_ops;
|
||||
viodev->dev.archdata.dma_data = vio_build_iommu_table(viodev);
|
||||
viodev->dev.archdata.numa_node = of_node_to_nid(of_node);
|
||||
set_dev_node(&viodev->dev, of_node_to_nid(of_node));
|
||||
|
||||
/* init generic 'struct device' fields: */
|
||||
viodev->dev.parent = &vio_bus_device.dev;
|
||||
|
@ -556,11 +556,11 @@ static struct iommu_table *cell_get_iommu_table(struct device *dev)
|
||||
* node's iommu. We -might- do something smarter later though it may
|
||||
* never be necessary
|
||||
*/
|
||||
iommu = cell_iommu_for_node(archdata->numa_node);
|
||||
iommu = cell_iommu_for_node(dev_to_node(dev));
|
||||
if (iommu == NULL || list_empty(&iommu->windows)) {
|
||||
printk(KERN_ERR "iommu: missing iommu for %s (node %d)\n",
|
||||
archdata->of_node ? archdata->of_node->full_name : "?",
|
||||
archdata->numa_node);
|
||||
dev_to_node(dev));
|
||||
return NULL;
|
||||
}
|
||||
window = list_entry(iommu->windows.next, struct iommu_window, list);
|
||||
@ -577,7 +577,7 @@ static void *dma_fixed_alloc_coherent(struct device *dev, size_t size,
|
||||
return iommu_alloc_coherent(dev, cell_get_iommu_table(dev),
|
||||
size, dma_handle,
|
||||
device_to_mask(dev), flag,
|
||||
dev->archdata.numa_node);
|
||||
dev_to_node(dev));
|
||||
else
|
||||
return dma_direct_ops.alloc_coherent(dev, size, dma_handle,
|
||||
flag);
|
||||
|
@ -762,7 +762,7 @@ int ps3_system_bus_device_register(struct ps3_system_bus_device *dev)
|
||||
};
|
||||
|
||||
dev->core.archdata.of_node = NULL;
|
||||
dev->core.archdata.numa_node = 0;
|
||||
set_dev_node(&dev->core, 0);
|
||||
|
||||
pr_debug("%s:%d add %s\n", __func__, __LINE__, dev->core.bus_id);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user