[SPARC64]: Beginnings of SUN4V PCI controller support.
Abstract out IOMMU operations so that we can have a different set of calls on sun4v, which needs to do things through hypervisor calls. Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
4cce4b7cc5
commit
8f6a93a196
|
@ -14,7 +14,7 @@ obj-y := process.o setup.o cpu.o idprom.o \
|
|||
power.o sbus.o iommu_common.o sparc64_ksyms.o chmc.o
|
||||
|
||||
obj-$(CONFIG_PCI) += ebus.o isa.o pci_common.o pci_iommu.o \
|
||||
pci_psycho.o pci_sabre.o pci_schizo.o
|
||||
pci_psycho.o pci_sabre.o pci_schizo.o pci_sun4v.o
|
||||
obj-$(CONFIG_SMP) += smp.o trampoline.o
|
||||
obj-$(CONFIG_SPARC32_COMPAT) += sys32.o sys_sparc32.o signal32.o
|
||||
obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
|
||||
|
|
|
@ -188,6 +188,7 @@ extern void psycho_init(int, char *);
|
|||
extern void schizo_init(int, char *);
|
||||
extern void schizo_plus_init(int, char *);
|
||||
extern void tomatillo_init(int, char *);
|
||||
extern void sun4v_pci_init(int, char *);
|
||||
|
||||
static struct {
|
||||
char *model_name;
|
||||
|
@ -204,6 +205,7 @@ static struct {
|
|||
{ "pci108e,8002", schizo_plus_init },
|
||||
{ "SUNW,tomatillo", tomatillo_init },
|
||||
{ "pci108e,a801", tomatillo_init },
|
||||
{ "SUNW,sun4v-pci", sun4v_pci_init },
|
||||
};
|
||||
#define PCI_NUM_CONTROLLER_TYPES (sizeof(pci_controller_table) / \
|
||||
sizeof(pci_controller_table[0]))
|
||||
|
@ -283,6 +285,12 @@ int __init pcic_present(void)
|
|||
return pci_controller_scan(pci_is_controller);
|
||||
}
|
||||
|
||||
struct pci_iommu_ops *pci_iommu_ops;
|
||||
EXPORT_SYMBOL(pci_iommu_ops);
|
||||
|
||||
extern struct pci_iommu_ops pci_sun4u_iommu_ops,
|
||||
pci_sun4v_iommu_ops;
|
||||
|
||||
/* Find each controller in the system, attach and initialize
|
||||
* software state structure for each and link into the
|
||||
* pci_controller_root. Setup the controller enough such
|
||||
|
@ -290,6 +298,11 @@ int __init pcic_present(void)
|
|||
*/
|
||||
static void __init pci_controller_probe(void)
|
||||
{
|
||||
if (tlb_type == hypervisor)
|
||||
pci_iommu_ops = &pci_sun4v_iommu_ops;
|
||||
else
|
||||
pci_iommu_ops = &pci_sun4u_iommu_ops;
|
||||
|
||||
printk("PCI: Probing for controllers.\n");
|
||||
|
||||
pci_controller_scan(pci_controller_init);
|
||||
|
|
|
@ -219,7 +219,7 @@ static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
|
|||
* DMA for PCI device PDEV. Return non-NULL cpu-side address if
|
||||
* successful and set *DMA_ADDRP to the PCI side dma address.
|
||||
*/
|
||||
void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
|
||||
static void *pci_4u_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -267,7 +267,7 @@ void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_ad
|
|||
}
|
||||
|
||||
/* Free and unmap a consistent DMA translation. */
|
||||
void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
|
||||
static void pci_4u_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -294,7 +294,7 @@ void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_
|
|||
/* Map a single buffer at PTR of SZ bytes for PCI DMA
|
||||
* in streaming mode.
|
||||
*/
|
||||
dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
|
||||
static dma_addr_t pci_4u_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -415,7 +415,7 @@ static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu,
|
|||
}
|
||||
|
||||
/* Unmap a single streaming mode DMA translation. */
|
||||
void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||
static void pci_4u_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -548,7 +548,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
|
|||
* When making changes here, inspect the assembly output. I was having
|
||||
* hard time to kepp this routine out of using stack slots for holding variables.
|
||||
*/
|
||||
int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
static int pci_4u_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -635,7 +635,7 @@ int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int
|
|||
}
|
||||
|
||||
/* Unmap a set of streaming mode DMA translations. */
|
||||
void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
static void pci_4u_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -695,7 +695,7 @@ void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems,
|
|||
/* Make physical memory consistent for a single
|
||||
* streaming mode DMA translation after a transfer.
|
||||
*/
|
||||
void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||
static void pci_4u_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -735,7 +735,7 @@ void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size
|
|||
/* Make physical memory consistent for a set of streaming
|
||||
* mode DMA translations after a transfer.
|
||||
*/
|
||||
void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
static void pci_4u_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
{
|
||||
struct pcidev_cookie *pcp;
|
||||
struct pci_iommu *iommu;
|
||||
|
@ -776,6 +776,17 @@ void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, i
|
|||
spin_unlock_irqrestore(&iommu->lock, flags);
|
||||
}
|
||||
|
||||
struct pci_iommu_ops pci_sun4u_iommu_ops = {
|
||||
.alloc_consistent = pci_4u_alloc_consistent,
|
||||
.free_consistent = pci_4u_free_consistent,
|
||||
.map_single = pci_4u_map_single,
|
||||
.unmap_single = pci_4u_unmap_single,
|
||||
.map_sg = pci_4u_map_sg,
|
||||
.unmap_sg = pci_4u_unmap_sg,
|
||||
.dma_sync_single_for_cpu = pci_4u_dma_sync_single_for_cpu,
|
||||
.dma_sync_sg_for_cpu = pci_4u_dma_sync_sg_for_cpu,
|
||||
};
|
||||
|
||||
static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
|
||||
{
|
||||
struct pci_dev *ali_isa_bridge;
|
||||
|
|
74
arch/sparc64/kernel/pci_sun4v.c
Normal file
74
arch/sparc64/kernel/pci_sun4v.c
Normal file
|
@ -0,0 +1,74 @@
|
|||
/* pci_sun4v.c: SUN4V specific PCI controller support.
|
||||
*
|
||||
* Copyright (C) 2006 David S. Miller (davem@davemloft.net)
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/pci.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/interrupt.h>
|
||||
|
||||
#include <asm/pbm.h>
|
||||
#include <asm/iommu.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/upa.h>
|
||||
#include <asm/pstate.h>
|
||||
#include <asm/oplib.h>
|
||||
#include <asm/hypervisor.h>
|
||||
|
||||
#include "pci_impl.h"
|
||||
#include "iommu_common.h"
|
||||
|
||||
static void *pci_4v_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void pci_4v_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
|
||||
{
|
||||
}
|
||||
|
||||
static dma_addr_t pci_4v_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void pci_4v_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static int pci_4v_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
{
|
||||
return nelems;
|
||||
}
|
||||
|
||||
static void pci_4v_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static void pci_4v_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
static void pci_4v_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
|
||||
{
|
||||
}
|
||||
|
||||
struct pci_iommu_ops pci_sun4v_iommu_ops = {
|
||||
.alloc_consistent = pci_4v_alloc_consistent,
|
||||
.free_consistent = pci_4v_free_consistent,
|
||||
.map_single = pci_4v_map_single,
|
||||
.unmap_single = pci_4v_unmap_single,
|
||||
.map_sg = pci_4v_map_sg,
|
||||
.unmap_sg = pci_4v_unmap_sg,
|
||||
.dma_sync_single_for_cpu = pci_4v_dma_sync_single_for_cpu,
|
||||
.dma_sync_sg_for_cpu = pci_4v_dma_sync_sg_for_cpu,
|
||||
};
|
||||
|
||||
void sun4v_pci_init(int node, char *model_name)
|
||||
{
|
||||
prom_printf("sun4v_pci_init: Implement me.\n");
|
||||
prom_halt();
|
||||
}
|
|
@ -41,10 +41,26 @@ static inline void pcibios_penalize_isa_irq(int irq, int active)
|
|||
|
||||
struct pci_dev;
|
||||
|
||||
struct pci_iommu_ops {
|
||||
void *(*alloc_consistent)(struct pci_dev *, size_t, dma_addr_t *);
|
||||
void (*free_consistent)(struct pci_dev *, size_t, void *, dma_addr_t);
|
||||
dma_addr_t (*map_single)(struct pci_dev *, void *, size_t, int);
|
||||
void (*unmap_single)(struct pci_dev *, dma_addr_t, size_t, int);
|
||||
int (*map_sg)(struct pci_dev *, struct scatterlist *, int, int);
|
||||
void (*unmap_sg)(struct pci_dev *, struct scatterlist *, int, int);
|
||||
void (*dma_sync_single_for_cpu)(struct pci_dev *, dma_addr_t, size_t, int);
|
||||
void (*dma_sync_sg_for_cpu)(struct pci_dev *, struct scatterlist *, int, int);
|
||||
};
|
||||
|
||||
extern struct pci_iommu_ops *pci_iommu_ops;
|
||||
|
||||
/* Allocate and map kernel buffer using consistent mode DMA for a device.
|
||||
* hwdev should be valid struct pci_dev pointer for PCI devices.
|
||||
*/
|
||||
extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle);
|
||||
static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle)
|
||||
{
|
||||
return pci_iommu_ops->alloc_consistent(hwdev, size, dma_handle);
|
||||
}
|
||||
|
||||
/* Free and unmap a consistent DMA buffer.
|
||||
* cpu_addr is what was returned from pci_alloc_consistent,
|
||||
|
@ -54,7 +70,10 @@ extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t
|
|||
* References to the memory and mappings associated with cpu_addr/dma_addr
|
||||
* past this call are illegal.
|
||||
*/
|
||||
extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
|
||||
static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle)
|
||||
{
|
||||
return pci_iommu_ops->free_consistent(hwdev, size, vaddr, dma_handle);
|
||||
}
|
||||
|
||||
/* Map a single buffer of the indicated size for DMA in streaming mode.
|
||||
* The 32-bit bus address to use is returned.
|
||||
|
@ -62,7 +81,10 @@ extern void pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr,
|
|||
* Once the device is given the dma address, the device owns this memory
|
||||
* until either pci_unmap_single or pci_dma_sync_single_for_cpu is performed.
|
||||
*/
|
||||
extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction);
|
||||
static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction)
|
||||
{
|
||||
return pci_iommu_ops->map_single(hwdev, ptr, size, direction);
|
||||
}
|
||||
|
||||
/* Unmap a single streaming mode DMA translation. The dma_addr and size
|
||||
* must match what was provided for in a previous pci_map_single call. All
|
||||
|
@ -71,7 +93,10 @@ extern dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size,
|
|||
* After this call, reads by the cpu to the buffer are guaranteed to see
|
||||
* whatever the device wrote there.
|
||||
*/
|
||||
extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction);
|
||||
static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction)
|
||||
{
|
||||
pci_iommu_ops->unmap_single(hwdev, dma_addr, size, direction);
|
||||
}
|
||||
|
||||
/* No highmem on sparc64, plus we have an IOMMU, so mapping pages is easy. */
|
||||
#define pci_map_page(dev, page, off, size, dir) \
|
||||
|
@ -107,15 +132,19 @@ extern void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t
|
|||
* Device ownership issues as mentioned above for pci_map_single are
|
||||
* the same here.
|
||||
*/
|
||||
extern int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||
int nents, int direction);
|
||||
static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
|
||||
{
|
||||
return pci_iommu_ops->map_sg(hwdev, sg, nents, direction);
|
||||
}
|
||||
|
||||
/* Unmap a set of streaming mode DMA translations.
|
||||
* Again, cpu read rules concerning calls here are the same as for
|
||||
* pci_unmap_single() above.
|
||||
*/
|
||||
extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||
int nhwents, int direction);
|
||||
static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nhwents, int direction)
|
||||
{
|
||||
pci_iommu_ops->unmap_sg(hwdev, sg, nhwents, direction);
|
||||
}
|
||||
|
||||
/* Make physical memory consistent for a single
|
||||
* streaming mode DMA translation after a transfer.
|
||||
|
@ -127,8 +156,10 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
|
|||
* must first perform a pci_dma_sync_for_device, and then the
|
||||
* device again owns the buffer.
|
||||
*/
|
||||
extern void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle,
|
||||
size_t size, int direction);
|
||||
static inline void pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, size_t size, int direction)
|
||||
{
|
||||
pci_iommu_ops->dma_sync_single_for_cpu(hwdev, dma_handle, size, direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
|
||||
|
@ -144,7 +175,10 @@ pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle,
|
|||
* The same as pci_dma_sync_single_* but for a scatter-gather list,
|
||||
* same rules and usage.
|
||||
*/
|
||||
extern void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction);
|
||||
static inline void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nelems, int direction)
|
||||
{
|
||||
pci_iommu_ops->dma_sync_sg_for_cpu(hwdev, sg, nelems, direction);
|
||||
}
|
||||
|
||||
static inline void
|
||||
pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
|
||||
|
|
Loading…
Reference in New Issue
Block a user