Merge branch 'spi-5.4' into spi-next
This commit is contained in:
commit
b769c5ba8a
47
Documentation/devicetree/bindings/spi/nuvoton,npcm-fiu.txt
Normal file
47
Documentation/devicetree/bindings/spi/nuvoton,npcm-fiu.txt
Normal file
|
@ -0,0 +1,47 @@
|
|||
* Nuvoton FLASH Interface Unit (FIU) SPI Controller
|
||||
|
||||
NPCM FIU supports single, dual and quad communication interface.
|
||||
|
||||
The NPCM7XX supports three FIU modules,
|
||||
FIU0 and FIUx supports two chip selects,
|
||||
FIU3 support four chip select.
|
||||
|
||||
Required properties:
|
||||
- compatible : "nuvoton,npcm750-fiu" for the NPCM7XX BMC
|
||||
- #address-cells : should be 1.
|
||||
- #size-cells : should be 0.
|
||||
- reg : the first contains the register location and length,
|
||||
the second contains the memory mapping address and length
|
||||
- reg-names: Should contain the reg names "control" and "memory"
|
||||
- clocks : phandle of FIU reference clock.
|
||||
|
||||
Required properties in case the pins can be muxed:
|
||||
- pinctrl-names : a pinctrl state named "default" must be defined.
|
||||
- pinctrl-0 : phandle referencing pin configuration of the device.
|
||||
|
||||
Optional property:
|
||||
- nuvoton,spix-mode: enable spix-mode for an expansion bus to an ASIC or CPLD.
|
||||
|
||||
Aliases:
|
||||
- All the FIU controller nodes should be represented in the aliases node using
|
||||
the following format 'fiu{n}' where n is a unique number for the alias.
|
||||
In the NPCM7XX BMC:
|
||||
fiu0 represent fiu 0 controller
|
||||
fiu1 represent fiu 3 controller
|
||||
fiu2 represent fiu x controller
|
||||
|
||||
Example:
|
||||
fiu3: spi@c00000000 {
|
||||
compatible = "nuvoton,npcm750-fiu";
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
reg = <0xfb000000 0x1000>, <0x80000000 0x10000000>;
|
||||
reg-names = "control", "memory";
|
||||
clocks = <&clk NPCM7XX_CLK_AHB>;
|
||||
pinctrl-names = "default";
|
||||
pinctrl-0 = <&spi3_pins>;
|
||||
spi-nor@0 {
|
||||
...
|
||||
};
|
||||
};
|
||||
|
|
@ -3,9 +3,8 @@
|
|||
Required properties:
|
||||
- compatible : Should be "fsl,vf610-qspi", "fsl,imx6sx-qspi",
|
||||
"fsl,imx7d-qspi", "fsl,imx6ul-qspi",
|
||||
"fsl,ls1021a-qspi"
|
||||
"fsl,ls1021a-qspi", "fsl,ls2080a-qspi"
|
||||
or
|
||||
"fsl,ls2080a-qspi" followed by "fsl,ls1021a-qspi",
|
||||
"fsl,ls1043a-qspi" followed by "fsl,ls1021a-qspi"
|
||||
- reg : the first contains the register location and length,
|
||||
the second contains the memory mapping address and length
|
||||
|
@ -34,7 +33,11 @@ qspi0: quadspi@40044000 {
|
|||
clock-names = "qspi_en", "qspi";
|
||||
|
||||
flash0: s25fl128s@0 {
|
||||
....
|
||||
#address-cells = <1>;
|
||||
#size-cells = <1>;
|
||||
compatible = "spansion,s25fl128s", "jedec,spi-nor";
|
||||
spi-max-frequency = <50000000>;
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
|
||||
|
|
|
@ -5,6 +5,7 @@ Required properties:
|
|||
- mediatek,mt2701-spi: for mt2701 platforms
|
||||
- mediatek,mt2712-spi: for mt2712 platforms
|
||||
- mediatek,mt6589-spi: for mt6589 platforms
|
||||
- mediatek,mt6765-spi: for mt6765 platforms
|
||||
- mediatek,mt7622-spi: for mt7622 platforms
|
||||
- "mediatek,mt7629-spi", "mediatek,mt7622-spi": for mt7629 platforms
|
||||
- mediatek,mt8135-spi: for mt8135 platforms
|
||||
|
|
|
@ -25,18 +25,23 @@ data by ADI software channels at the same time, or two parallel routine of setti
|
|||
ADI registers will make ADI controller registers chaos to lead incorrect results.
|
||||
Then we need one hardware spinlock to synchronize between the multiple subsystems.
|
||||
|
||||
The new version ADI controller supplies multiple master channels for different
|
||||
subsystem accessing, that means no need to add hardware spinlock to synchronize,
|
||||
thus change the hardware spinlock support to be optional to keep backward
|
||||
compatibility.
|
||||
|
||||
Required properties:
|
||||
- compatible: Should be "sprd,sc9860-adi".
|
||||
- reg: Offset and length of ADI-SPI controller register space.
|
||||
- hwlocks: Reference to a phandle of a hwlock provider node.
|
||||
- hwlock-names: Reference to hwlock name strings defined in the same order
|
||||
as the hwlocks, should be "adi".
|
||||
- #address-cells: Number of cells required to define a chip select address
|
||||
on the ADI-SPI bus. Should be set to 1.
|
||||
- #size-cells: Size of cells required to define a chip select address size
|
||||
on the ADI-SPI bus. Should be set to 0.
|
||||
|
||||
Optional properties:
|
||||
- hwlocks: Reference to a phandle of a hwlock provider node.
|
||||
- hwlock-names: Reference to hwlock name strings defined in the same order
|
||||
as the hwlocks, should be "adi".
|
||||
- sprd,hw-channels: This is an array of channel values up to 49 channels.
|
||||
The first value specifies the hardware channel id which is used to
|
||||
transfer data triggered by hardware automatically, and the second
|
||||
|
|
|
@ -103,7 +103,7 @@ static struct spi_board_info edb93xx_spi_board_info[] __initdata = {
|
|||
};
|
||||
|
||||
static struct gpiod_lookup_table edb93xx_spi_cs_gpio_table = {
|
||||
.dev_id = "ep93xx-spi.0",
|
||||
.dev_id = "spi0",
|
||||
.table = {
|
||||
GPIO_LOOKUP("A", 6, "cs", GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
|
|
|
@ -73,7 +73,7 @@ static struct spi_board_info simone_spi_devices[] __initdata = {
|
|||
* v1.3 parts will still work, since the signal on SFRMOUT is automatic.
|
||||
*/
|
||||
static struct gpiod_lookup_table simone_spi_cs_gpio_table = {
|
||||
.dev_id = "ep93xx-spi.0",
|
||||
.dev_id = "spi0",
|
||||
.table = {
|
||||
GPIO_LOOKUP("A", 1, "cs", GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
|
|
|
@ -267,7 +267,7 @@ static struct spi_board_info bk3_spi_board_info[] __initdata = {
|
|||
* goes through CPLD
|
||||
*/
|
||||
static struct gpiod_lookup_table bk3_spi_cs_gpio_table = {
|
||||
.dev_id = "ep93xx-spi.0",
|
||||
.dev_id = "spi0",
|
||||
.table = {
|
||||
GPIO_LOOKUP("F", 3, "cs", GPIO_ACTIVE_LOW),
|
||||
{ },
|
||||
|
@ -316,7 +316,7 @@ static struct spi_board_info ts72xx_spi_devices[] __initdata = {
|
|||
};
|
||||
|
||||
static struct gpiod_lookup_table ts72xx_spi_cs_gpio_table = {
|
||||
.dev_id = "ep93xx-spi.0",
|
||||
.dev_id = "spi0",
|
||||
.table = {
|
||||
/* DIO_17 */
|
||||
GPIO_LOOKUP("F", 2, "cs", GPIO_ACTIVE_LOW),
|
||||
|
|
|
@ -242,7 +242,7 @@ static struct spi_board_info vision_spi_board_info[] __initdata = {
|
|||
};
|
||||
|
||||
static struct gpiod_lookup_table vision_spi_cs_gpio_table = {
|
||||
.dev_id = "ep93xx-spi.0",
|
||||
.dev_id = "spi0",
|
||||
.table = {
|
||||
GPIO_LOOKUP_IDX("A", 6, "cs", 0, GPIO_ACTIVE_LOW),
|
||||
GPIO_LOOKUP_IDX("A", 7, "cs", 1, GPIO_ACTIVE_LOW),
|
||||
|
|
|
@ -37,10 +37,19 @@
|
|||
#define BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED 14
|
||||
#define BCM2835_DMA_CHAN_NAME_SIZE 8
|
||||
|
||||
/**
|
||||
* struct bcm2835_dmadev - BCM2835 DMA controller
|
||||
* @ddev: DMA device
|
||||
* @base: base address of register map
|
||||
* @dma_parms: DMA parameters (to convey 1 GByte max segment size to clients)
|
||||
* @zero_page: bus address of zero page (to detect transactions copying from
|
||||
* zero page and avoid accessing memory if so)
|
||||
*/
|
||||
struct bcm2835_dmadev {
|
||||
struct dma_device ddev;
|
||||
void __iomem *base;
|
||||
struct device_dma_parameters dma_parms;
|
||||
dma_addr_t zero_page;
|
||||
};
|
||||
|
||||
struct bcm2835_dma_cb {
|
||||
|
@ -687,11 +696,12 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
size_t period_len, enum dma_transfer_direction direction,
|
||||
unsigned long flags)
|
||||
{
|
||||
struct bcm2835_dmadev *od = to_bcm2835_dma_dev(chan->device);
|
||||
struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
|
||||
struct bcm2835_desc *d;
|
||||
dma_addr_t src, dst;
|
||||
u32 info = BCM2835_DMA_WAIT_RESP;
|
||||
u32 extra = BCM2835_DMA_INT_EN;
|
||||
u32 extra = 0;
|
||||
size_t max_len = bcm2835_dma_max_frame_length(c);
|
||||
size_t frames;
|
||||
|
||||
|
@ -707,6 +717,11 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
return NULL;
|
||||
}
|
||||
|
||||
if (flags & DMA_PREP_INTERRUPT)
|
||||
extra |= BCM2835_DMA_INT_EN;
|
||||
else
|
||||
period_len = buf_len;
|
||||
|
||||
/*
|
||||
* warn if buf_len is not a multiple of period_len - this may leed
|
||||
* to unexpected latencies for interrupts and thus audiable clicks
|
||||
|
@ -732,6 +747,10 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic(
|
|||
dst = c->cfg.dst_addr;
|
||||
src = buf_addr;
|
||||
info |= BCM2835_DMA_D_DREQ | BCM2835_DMA_S_INC;
|
||||
|
||||
/* non-lite channels can write zeroes w/o accessing memory */
|
||||
if (buf_addr == od->zero_page && !c->is_lite_channel)
|
||||
info |= BCM2835_DMA_S_IGNORE;
|
||||
}
|
||||
|
||||
/* calculate number of frames */
|
||||
|
@ -778,7 +797,10 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan)
|
|||
|
||||
/* stop DMA activity */
|
||||
if (c->desc) {
|
||||
vchan_terminate_vdesc(&c->desc->vd);
|
||||
if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT)
|
||||
vchan_terminate_vdesc(&c->desc->vd);
|
||||
else
|
||||
vchan_vdesc_fini(&c->desc->vd);
|
||||
c->desc = NULL;
|
||||
bcm2835_dma_abort(c);
|
||||
}
|
||||
|
@ -831,6 +853,9 @@ static void bcm2835_dma_free(struct bcm2835_dmadev *od)
|
|||
list_del(&c->vc.chan.device_node);
|
||||
tasklet_kill(&c->vc.task);
|
||||
}
|
||||
|
||||
dma_unmap_page_attrs(od->ddev.dev, od->zero_page, PAGE_SIZE,
|
||||
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
|
||||
static const struct of_device_id bcm2835_dma_of_match[] = {
|
||||
|
@ -907,11 +932,20 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
|
|||
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
|
||||
BIT(DMA_MEM_TO_MEM);
|
||||
od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
|
||||
od->ddev.descriptor_reuse = true;
|
||||
od->ddev.dev = &pdev->dev;
|
||||
INIT_LIST_HEAD(&od->ddev.channels);
|
||||
|
||||
platform_set_drvdata(pdev, od);
|
||||
|
||||
od->zero_page = dma_map_page_attrs(od->ddev.dev, ZERO_PAGE(0), 0,
|
||||
PAGE_SIZE, DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(od->ddev.dev, od->zero_page)) {
|
||||
dev_err(&pdev->dev, "Failed to map zero page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
/* Request DMA channel mask from device tree */
|
||||
if (of_property_read_u32(pdev->dev.of_node,
|
||||
"brcm,dma-channel-mask",
|
||||
|
|
|
@ -706,7 +706,7 @@ static int cros_ec_spi_devm_high_pri_alloc(struct device *dev,
|
|||
struct cros_ec_spi *ec_spi)
|
||||
{
|
||||
struct sched_param sched_priority = {
|
||||
.sched_priority = MAX_RT_PRIO - 1,
|
||||
.sched_priority = MAX_RT_PRIO / 2,
|
||||
};
|
||||
int err;
|
||||
|
||||
|
|
|
@ -433,6 +433,16 @@ config SPI_MT7621
|
|||
help
|
||||
This selects a driver for the MediaTek MT7621 SPI Controller.
|
||||
|
||||
config SPI_NPCM_FIU
|
||||
tristate "Nuvoton NPCM FLASH Interface Unit"
|
||||
depends on ARCH_NPCM || COMPILE_TEST
|
||||
depends on OF && HAS_IOMEM
|
||||
help
|
||||
This enables support for the Flash Interface Unit SPI controller
|
||||
in master mode.
|
||||
This driver does not support generic SPI. The implementation only
|
||||
supports spi-mem interface.
|
||||
|
||||
config SPI_NPCM_PSPI
|
||||
tristate "Nuvoton NPCM PSPI Controller"
|
||||
depends on ARCH_NPCM || COMPILE_TEST
|
||||
|
|
|
@ -63,6 +63,7 @@ obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
|
|||
obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
|
||||
obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
|
||||
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
|
||||
obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
|
||||
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
|
||||
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
|
||||
obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
|
||||
|
|
|
@ -526,7 +526,6 @@ static int atmel_qspi_probe(struct platform_device *pdev)
|
|||
/* Request the IRQ */
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "missing IRQ\n");
|
||||
err = irq;
|
||||
goto disable_qspick;
|
||||
}
|
||||
|
|
|
@ -170,7 +170,6 @@ static int altera_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct altera_spi *hw;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
int err = -ENODEV;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
|
||||
|
@ -189,8 +188,7 @@ static int altera_spi_probe(struct platform_device *pdev)
|
|||
hw = spi_master_get_devdata(master);
|
||||
|
||||
/* find and map our resources */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hw->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
hw->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(hw->base)) {
|
||||
err = PTR_ERR(hw->base);
|
||||
goto exit;
|
||||
|
|
|
@ -817,7 +817,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
struct device_node *of_node = dev->of_node;
|
||||
struct resource *res;
|
||||
struct spi_master *master;
|
||||
struct a3700_spi *spi;
|
||||
u32 num_cs = 0;
|
||||
|
@ -855,8 +854,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
|
|||
|
||||
spi->master = master;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
spi->base = devm_ioremap_resource(dev, res);
|
||||
spi->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(spi->base)) {
|
||||
ret = PTR_ERR(spi->base);
|
||||
goto error;
|
||||
|
@ -864,7 +862,6 @@ static int a3700_spi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "could not get irq: %d\n", irq);
|
||||
ret = -ENXIO;
|
||||
goto error;
|
||||
}
|
||||
|
|
|
@ -139,7 +139,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
|
|||
struct spi_master *master;
|
||||
struct ath79_spi *sp;
|
||||
struct ath79_spi_platform_data *pdata;
|
||||
struct resource *r;
|
||||
unsigned long rate;
|
||||
int ret;
|
||||
|
||||
|
@ -169,8 +168,7 @@ static int ath79_spi_probe(struct platform_device *pdev)
|
|||
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
|
||||
sp->bitbang.flags = SPI_CS_HIGH;
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sp->base = devm_ioremap_resource(&pdev->dev, r);
|
||||
sp->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(sp->base)) {
|
||||
ret = PTR_ERR(sp->base);
|
||||
goto err_put_master;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/pinctrl/consumer.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <trace/events/spi.h>
|
||||
|
||||
/* SPI register offsets */
|
||||
#define SPI_CR 0x0000
|
||||
|
@ -1409,9 +1410,13 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
|
|||
msg->actual_length = 0;
|
||||
|
||||
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
|
||||
trace_spi_transfer_start(msg, xfer);
|
||||
|
||||
ret = atmel_spi_one_transfer(master, msg, xfer);
|
||||
if (ret)
|
||||
goto msg_done;
|
||||
|
||||
trace_spi_transfer_stop(msg, xfer);
|
||||
}
|
||||
|
||||
if (as->use_pdc)
|
||||
|
|
|
@ -460,7 +460,6 @@ static int spi_engine_probe(struct platform_device *pdev)
|
|||
struct spi_engine *spi_engine;
|
||||
struct spi_master *master;
|
||||
unsigned int version;
|
||||
struct resource *res;
|
||||
int irq;
|
||||
int ret;
|
||||
|
||||
|
@ -480,8 +479,7 @@ static int spi_engine_probe(struct platform_device *pdev)
|
|||
|
||||
spin_lock_init(&spi_engine->lock);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
spi_engine->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
spi_engine->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(spi_engine->base)) {
|
||||
ret = PTR_ERR(spi_engine->base);
|
||||
goto err_put_master;
|
||||
|
|
|
@ -897,6 +897,7 @@ static int bcm_qspi_transfer_one(struct spi_master *master,
|
|||
|
||||
read_from_hw(qspi, slots);
|
||||
}
|
||||
bcm_qspi_enable_bspi(qspi);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -25,7 +25,9 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/gpio/machine.h> /* FIXME: using chip internals */
|
||||
#include <linux/gpio/driver.h> /* FIXME: using chip internals */
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
||||
|
@ -66,6 +68,7 @@
|
|||
#define BCM2835_SPI_FIFO_SIZE 64
|
||||
#define BCM2835_SPI_FIFO_SIZE_3_4 48
|
||||
#define BCM2835_SPI_DMA_MIN_LENGTH 96
|
||||
#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */
|
||||
#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
|
||||
| SPI_NO_CS | SPI_3WIRE)
|
||||
|
||||
|
@ -92,7 +95,8 @@ MODULE_PARM_DESC(polling_limit_us,
|
|||
* @rx_prologue: bytes received without DMA if first RX sglist entry's
|
||||
* length is not a multiple of 4 (to overcome hardware limitation)
|
||||
* @tx_spillover: whether @tx_prologue spills over to second TX sglist entry
|
||||
* @dma_pending: whether a DMA transfer is in progress
|
||||
* @prepare_cs: precalculated CS register value for ->prepare_message()
|
||||
* (uses slave-specific clock polarity and phase settings)
|
||||
* @debugfs_dir: the debugfs directory - neede to remove debugfs when
|
||||
* unloading the module
|
||||
* @count_transfer_polling: count of how often polling mode is used
|
||||
|
@ -102,6 +106,19 @@ MODULE_PARM_DESC(polling_limit_us,
|
|||
* These are counted as well in @count_transfer_polling and
|
||||
* @count_transfer_irq
|
||||
* @count_transfer_dma: count how often dma mode is used
|
||||
* @chip_select: SPI slave currently selected
|
||||
* (used by bcm2835_spi_dma_tx_done() to write @clear_rx_cs)
|
||||
* @tx_dma_active: whether a TX DMA descriptor is in progress
|
||||
* @rx_dma_active: whether a RX DMA descriptor is in progress
|
||||
* (used by bcm2835_spi_dma_tx_done() to handle a race)
|
||||
* @fill_tx_desc: preallocated TX DMA descriptor used for RX-only transfers
|
||||
* (cyclically copies from zero page to TX FIFO)
|
||||
* @fill_tx_addr: bus address of zero page
|
||||
* @clear_rx_desc: preallocated RX DMA descriptor used for TX-only transfers
|
||||
* (cyclically clears RX FIFO by writing @clear_rx_cs to CS register)
|
||||
* @clear_rx_addr: bus address of @clear_rx_cs
|
||||
* @clear_rx_cs: precalculated CS register value to clear RX FIFO
|
||||
* (uses slave-specific clock polarity and phase settings)
|
||||
*/
|
||||
struct bcm2835_spi {
|
||||
void __iomem *regs;
|
||||
|
@ -115,13 +132,22 @@ struct bcm2835_spi {
|
|||
int tx_prologue;
|
||||
int rx_prologue;
|
||||
unsigned int tx_spillover;
|
||||
unsigned int dma_pending;
|
||||
u32 prepare_cs[BCM2835_SPI_NUM_CS];
|
||||
|
||||
struct dentry *debugfs_dir;
|
||||
u64 count_transfer_polling;
|
||||
u64 count_transfer_irq;
|
||||
u64 count_transfer_irq_after_polling;
|
||||
u64 count_transfer_dma;
|
||||
|
||||
u8 chip_select;
|
||||
unsigned int tx_dma_active;
|
||||
unsigned int rx_dma_active;
|
||||
struct dma_async_tx_descriptor *fill_tx_desc;
|
||||
dma_addr_t fill_tx_addr;
|
||||
struct dma_async_tx_descriptor *clear_rx_desc[BCM2835_SPI_NUM_CS];
|
||||
dma_addr_t clear_rx_addr;
|
||||
u32 clear_rx_cs[BCM2835_SPI_NUM_CS] ____cacheline_aligned;
|
||||
};
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
@ -455,14 +481,14 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
|
|||
bs->rx_prologue = 0;
|
||||
bs->tx_spillover = false;
|
||||
|
||||
if (!sg_is_last(&tfr->tx_sg.sgl[0]))
|
||||
if (bs->tx_buf && !sg_is_last(&tfr->tx_sg.sgl[0]))
|
||||
bs->tx_prologue = sg_dma_len(&tfr->tx_sg.sgl[0]) & 3;
|
||||
|
||||
if (!sg_is_last(&tfr->rx_sg.sgl[0])) {
|
||||
if (bs->rx_buf && !sg_is_last(&tfr->rx_sg.sgl[0])) {
|
||||
bs->rx_prologue = sg_dma_len(&tfr->rx_sg.sgl[0]) & 3;
|
||||
|
||||
if (bs->rx_prologue > bs->tx_prologue) {
|
||||
if (sg_is_last(&tfr->tx_sg.sgl[0])) {
|
||||
if (!bs->tx_buf || sg_is_last(&tfr->tx_sg.sgl[0])) {
|
||||
bs->tx_prologue = bs->rx_prologue;
|
||||
} else {
|
||||
bs->tx_prologue += 4;
|
||||
|
@ -496,6 +522,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr,
|
|||
sg_dma_len(&tfr->rx_sg.sgl[0]) -= bs->rx_prologue;
|
||||
}
|
||||
|
||||
if (!bs->tx_buf)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Write remaining TX prologue. Adjust first entry in TX sglist.
|
||||
* Also adjust second entry if prologue spills over to it.
|
||||
|
@ -541,6 +570,9 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
|
|||
sg_dma_len(&tfr->rx_sg.sgl[0]) += bs->rx_prologue;
|
||||
}
|
||||
|
||||
if (!bs->tx_buf)
|
||||
goto out;
|
||||
|
||||
if (likely(!bs->tx_spillover)) {
|
||||
sg_dma_address(&tfr->tx_sg.sgl[0]) -= bs->tx_prologue;
|
||||
sg_dma_len(&tfr->tx_sg.sgl[0]) += bs->tx_prologue;
|
||||
|
@ -549,32 +581,85 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs)
|
|||
sg_dma_address(&tfr->tx_sg.sgl[1]) -= 4;
|
||||
sg_dma_len(&tfr->tx_sg.sgl[1]) += 4;
|
||||
}
|
||||
out:
|
||||
bs->tx_prologue = 0;
|
||||
}
|
||||
|
||||
static void bcm2835_spi_dma_done(void *data)
|
||||
/**
|
||||
* bcm2835_spi_dma_rx_done() - callback for DMA RX channel
|
||||
* @data: SPI master controller
|
||||
*
|
||||
* Used for bidirectional and RX-only transfers.
|
||||
*/
|
||||
static void bcm2835_spi_dma_rx_done(void *data)
|
||||
{
|
||||
struct spi_controller *ctlr = data;
|
||||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
|
||||
/* reset fifo and HW */
|
||||
bcm2835_spi_reset_hw(ctlr);
|
||||
|
||||
/* and terminate tx-dma as we do not have an irq for it
|
||||
/* terminate tx-dma as we do not have an irq for it
|
||||
* because when the rx dma will terminate and this callback
|
||||
* is called the tx-dma must have finished - can't get to this
|
||||
* situation otherwise...
|
||||
*/
|
||||
if (cmpxchg(&bs->dma_pending, true, false)) {
|
||||
dmaengine_terminate_async(ctlr->dma_tx);
|
||||
bcm2835_spi_undo_prologue(bs);
|
||||
}
|
||||
dmaengine_terminate_async(ctlr->dma_tx);
|
||||
bs->tx_dma_active = false;
|
||||
bs->rx_dma_active = false;
|
||||
bcm2835_spi_undo_prologue(bs);
|
||||
|
||||
/* reset fifo and HW */
|
||||
bcm2835_spi_reset_hw(ctlr);
|
||||
|
||||
/* and mark as completed */;
|
||||
complete(&ctlr->xfer_completion);
|
||||
}
|
||||
|
||||
/**
|
||||
* bcm2835_spi_dma_tx_done() - callback for DMA TX channel
|
||||
* @data: SPI master controller
|
||||
*
|
||||
* Used for TX-only transfers.
|
||||
*/
|
||||
static void bcm2835_spi_dma_tx_done(void *data)
|
||||
{
|
||||
struct spi_controller *ctlr = data;
|
||||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
|
||||
/* busy-wait for TX FIFO to empty */
|
||||
while (!(bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE))
|
||||
bcm2835_wr(bs, BCM2835_SPI_CS,
|
||||
bs->clear_rx_cs[bs->chip_select]);
|
||||
|
||||
bs->tx_dma_active = false;
|
||||
smp_wmb();
|
||||
|
||||
/*
|
||||
* In case of a very short transfer, RX DMA may not have been
|
||||
* issued yet. The onus is then on bcm2835_spi_transfer_one_dma()
|
||||
* to terminate it immediately after issuing.
|
||||
*/
|
||||
if (cmpxchg(&bs->rx_dma_active, true, false))
|
||||
dmaengine_terminate_async(ctlr->dma_rx);
|
||||
|
||||
bcm2835_spi_undo_prologue(bs);
|
||||
bcm2835_spi_reset_hw(ctlr);
|
||||
complete(&ctlr->xfer_completion);
|
||||
}
|
||||
|
||||
/**
|
||||
* bcm2835_spi_prepare_sg() - prepare and submit DMA descriptor for sglist
|
||||
* @ctlr: SPI master controller
|
||||
* @spi: SPI slave
|
||||
* @tfr: SPI transfer
|
||||
* @bs: BCM2835 SPI controller
|
||||
* @is_tx: whether to submit DMA descriptor for TX or RX sglist
|
||||
*
|
||||
* Prepare and submit a DMA descriptor for the TX or RX sglist of @tfr.
|
||||
* Return 0 on success or a negative error number.
|
||||
*/
|
||||
static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *tfr,
|
||||
struct bcm2835_spi *bs,
|
||||
bool is_tx)
|
||||
{
|
||||
struct dma_chan *chan;
|
||||
|
@ -591,8 +676,7 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
|
|||
chan = ctlr->dma_tx;
|
||||
nents = tfr->tx_sg.nents;
|
||||
sgl = tfr->tx_sg.sgl;
|
||||
flags = 0 /* no tx interrupt */;
|
||||
|
||||
flags = tfr->rx_buf ? 0 : DMA_PREP_INTERRUPT;
|
||||
} else {
|
||||
dir = DMA_DEV_TO_MEM;
|
||||
chan = ctlr->dma_rx;
|
||||
|
@ -605,10 +689,17 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
|
|||
if (!desc)
|
||||
return -EINVAL;
|
||||
|
||||
/* set callback for rx */
|
||||
/*
|
||||
* Completion is signaled by the RX channel for bidirectional and
|
||||
* RX-only transfers; else by the TX channel for TX-only transfers.
|
||||
*/
|
||||
if (!is_tx) {
|
||||
desc->callback = bcm2835_spi_dma_done;
|
||||
desc->callback = bcm2835_spi_dma_rx_done;
|
||||
desc->callback_param = ctlr;
|
||||
} else if (!tfr->rx_buf) {
|
||||
desc->callback = bcm2835_spi_dma_tx_done;
|
||||
desc->callback_param = ctlr;
|
||||
bs->chip_select = spi->chip_select;
|
||||
}
|
||||
|
||||
/* submit it to DMA-engine */
|
||||
|
@ -617,12 +708,60 @@ static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr,
|
|||
return dma_submit_error(cookie);
|
||||
}
|
||||
|
||||
/**
|
||||
* bcm2835_spi_transfer_one_dma() - perform SPI transfer using DMA engine
|
||||
* @ctlr: SPI master controller
|
||||
* @spi: SPI slave
|
||||
* @tfr: SPI transfer
|
||||
* @cs: CS register
|
||||
*
|
||||
* For *bidirectional* transfers (both tx_buf and rx_buf are non-%NULL), set up
|
||||
* the TX and RX DMA channel to copy between memory and FIFO register.
|
||||
*
|
||||
* For *TX-only* transfers (rx_buf is %NULL), copying the RX FIFO's contents to
|
||||
* memory is pointless. However not reading the RX FIFO isn't an option either
|
||||
* because transmission is halted once it's full. As a workaround, cyclically
|
||||
* clear the RX FIFO by setting the CLEAR_RX bit in the CS register.
|
||||
*
|
||||
* The CS register value is precalculated in bcm2835_spi_setup(). Normally
|
||||
* this is called only once, on slave registration. A DMA descriptor to write
|
||||
* this value is preallocated in bcm2835_dma_init(). All that's left to do
|
||||
* when performing a TX-only transfer is to submit this descriptor to the RX
|
||||
* DMA channel. Latency is thereby minimized. The descriptor does not
|
||||
* generate any interrupts while running. It must be terminated once the
|
||||
* TX DMA channel is done.
|
||||
*
|
||||
* Clearing the RX FIFO is paced by the DREQ signal. The signal is asserted
|
||||
* when the RX FIFO becomes half full, i.e. 32 bytes. (Tuneable with the DC
|
||||
* register.) Reading 32 bytes from the RX FIFO would normally require 8 bus
|
||||
* accesses, whereas clearing it requires only 1 bus access. So an 8-fold
|
||||
* reduction in bus traffic and thus energy consumption is achieved.
|
||||
*
|
||||
* For *RX-only* transfers (tx_buf is %NULL), fill the TX FIFO by cyclically
|
||||
* copying from the zero page. The DMA descriptor to do this is preallocated
|
||||
* in bcm2835_dma_init(). It must be terminated once the RX DMA channel is
|
||||
* done and can then be reused.
|
||||
*
|
||||
* The BCM2835 DMA driver autodetects when a transaction copies from the zero
|
||||
* page and utilizes the DMA controller's ability to synthesize zeroes instead
|
||||
* of copying them from memory. This reduces traffic on the memory bus. The
|
||||
* feature is not available on so-called "lite" channels, but normally TX DMA
|
||||
* is backed by a full-featured channel.
|
||||
*
|
||||
* Zero-filling the TX FIFO is paced by the DREQ signal. Unfortunately the
|
||||
* BCM2835 SPI controller continues to assert DREQ even after the DLEN register
|
||||
* has been counted down to zero (hardware erratum). Thus, when the transfer
|
||||
* has finished, the DMA engine zero-fills the TX FIFO until it is half full.
|
||||
* (Tuneable with the DC register.) So up to 9 gratuitous bus accesses are
|
||||
* performed at the end of an RX-only transfer.
|
||||
*/
|
||||
static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *tfr,
|
||||
u32 cs)
|
||||
{
|
||||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
dma_cookie_t cookie;
|
||||
int ret;
|
||||
|
||||
/* update usage statistics */
|
||||
|
@ -635,16 +774,15 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
|
|||
bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs);
|
||||
|
||||
/* setup tx-DMA */
|
||||
ret = bcm2835_spi_prepare_sg(ctlr, tfr, true);
|
||||
if (bs->tx_buf) {
|
||||
ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, true);
|
||||
} else {
|
||||
cookie = dmaengine_submit(bs->fill_tx_desc);
|
||||
ret = dma_submit_error(cookie);
|
||||
}
|
||||
if (ret)
|
||||
goto err_reset_hw;
|
||||
|
||||
/* start TX early */
|
||||
dma_async_issue_pending(ctlr->dma_tx);
|
||||
|
||||
/* mark as dma pending */
|
||||
bs->dma_pending = 1;
|
||||
|
||||
/* set the DMA length */
|
||||
bcm2835_wr(bs, BCM2835_SPI_DLEN, bs->tx_len);
|
||||
|
||||
|
@ -652,20 +790,43 @@ static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr,
|
|||
bcm2835_wr(bs, BCM2835_SPI_CS,
|
||||
cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
|
||||
|
||||
bs->tx_dma_active = true;
|
||||
smp_wmb();
|
||||
|
||||
/* start TX early */
|
||||
dma_async_issue_pending(ctlr->dma_tx);
|
||||
|
||||
/* setup rx-DMA late - to run transfers while
|
||||
* mapping of the rx buffers still takes place
|
||||
* this saves 10us or more.
|
||||
*/
|
||||
ret = bcm2835_spi_prepare_sg(ctlr, tfr, false);
|
||||
if (bs->rx_buf) {
|
||||
ret = bcm2835_spi_prepare_sg(ctlr, spi, tfr, bs, false);
|
||||
} else {
|
||||
cookie = dmaengine_submit(bs->clear_rx_desc[spi->chip_select]);
|
||||
ret = dma_submit_error(cookie);
|
||||
}
|
||||
if (ret) {
|
||||
/* need to reset on errors */
|
||||
dmaengine_terminate_sync(ctlr->dma_tx);
|
||||
bs->dma_pending = false;
|
||||
bs->tx_dma_active = false;
|
||||
goto err_reset_hw;
|
||||
}
|
||||
|
||||
/* start rx dma late */
|
||||
dma_async_issue_pending(ctlr->dma_rx);
|
||||
bs->rx_dma_active = true;
|
||||
smp_mb();
|
||||
|
||||
/*
|
||||
* In case of a very short TX-only transfer, bcm2835_spi_dma_tx_done()
|
||||
* may run before RX DMA is issued. Terminate RX DMA if so.
|
||||
*/
|
||||
if (!bs->rx_buf && !bs->tx_dma_active &&
|
||||
cmpxchg(&bs->rx_dma_active, true, false)) {
|
||||
dmaengine_terminate_async(ctlr->dma_rx);
|
||||
bcm2835_spi_reset_hw(ctlr);
|
||||
}
|
||||
|
||||
/* wait for wakeup in framework */
|
||||
return 1;
|
||||
|
@ -688,26 +849,52 @@ static bool bcm2835_spi_can_dma(struct spi_controller *ctlr,
|
|||
return true;
|
||||
}
|
||||
|
||||
static void bcm2835_dma_release(struct spi_controller *ctlr)
|
||||
static void bcm2835_dma_release(struct spi_controller *ctlr,
|
||||
struct bcm2835_spi *bs)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ctlr->dma_tx) {
|
||||
dmaengine_terminate_sync(ctlr->dma_tx);
|
||||
|
||||
if (bs->fill_tx_desc)
|
||||
dmaengine_desc_free(bs->fill_tx_desc);
|
||||
|
||||
if (bs->fill_tx_addr)
|
||||
dma_unmap_page_attrs(ctlr->dma_tx->device->dev,
|
||||
bs->fill_tx_addr, sizeof(u32),
|
||||
DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
|
||||
dma_release_channel(ctlr->dma_tx);
|
||||
ctlr->dma_tx = NULL;
|
||||
}
|
||||
|
||||
if (ctlr->dma_rx) {
|
||||
dmaengine_terminate_sync(ctlr->dma_rx);
|
||||
|
||||
for (i = 0; i < BCM2835_SPI_NUM_CS; i++)
|
||||
if (bs->clear_rx_desc[i])
|
||||
dmaengine_desc_free(bs->clear_rx_desc[i]);
|
||||
|
||||
if (bs->clear_rx_addr)
|
||||
dma_unmap_single(ctlr->dma_rx->device->dev,
|
||||
bs->clear_rx_addr,
|
||||
sizeof(bs->clear_rx_cs),
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
dma_release_channel(ctlr->dma_rx);
|
||||
ctlr->dma_rx = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
|
||||
static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev,
|
||||
struct bcm2835_spi *bs)
|
||||
{
|
||||
struct dma_slave_config slave_config;
|
||||
const __be32 *addr;
|
||||
dma_addr_t dma_reg_base;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
/* base address in dma-space */
|
||||
addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL);
|
||||
|
@ -729,7 +916,11 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
|
|||
goto err_release;
|
||||
}
|
||||
|
||||
/* configure DMAs */
|
||||
/*
|
||||
* The TX DMA channel either copies a transfer's TX buffer to the FIFO
|
||||
* or, in case of an RX-only transfer, cyclically copies from the zero
|
||||
* page to the FIFO using a preallocated, reusable descriptor.
|
||||
*/
|
||||
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
|
||||
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
|
@ -737,17 +928,74 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
|
|||
if (ret)
|
||||
goto err_config;
|
||||
|
||||
bs->fill_tx_addr = dma_map_page_attrs(ctlr->dma_tx->device->dev,
|
||||
ZERO_PAGE(0), 0, sizeof(u32),
|
||||
DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(ctlr->dma_tx->device->dev, bs->fill_tx_addr)) {
|
||||
dev_err(dev, "cannot map zero page - not using DMA mode\n");
|
||||
bs->fill_tx_addr = 0;
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
bs->fill_tx_desc = dmaengine_prep_dma_cyclic(ctlr->dma_tx,
|
||||
bs->fill_tx_addr,
|
||||
sizeof(u32), 0,
|
||||
DMA_MEM_TO_DEV, 0);
|
||||
if (!bs->fill_tx_desc) {
|
||||
dev_err(dev, "cannot prepare fill_tx_desc - not using DMA mode\n");
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
ret = dmaengine_desc_set_reuse(bs->fill_tx_desc);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot reuse fill_tx_desc - not using DMA mode\n");
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
/*
|
||||
* The RX DMA channel is used bidirectionally: It either reads the
|
||||
* RX FIFO or, in case of a TX-only transfer, cyclically writes a
|
||||
* precalculated value to the CS register to clear the RX FIFO.
|
||||
*/
|
||||
slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO);
|
||||
slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_CS);
|
||||
slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
|
||||
|
||||
ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config);
|
||||
if (ret)
|
||||
goto err_config;
|
||||
|
||||
bs->clear_rx_addr = dma_map_single(ctlr->dma_rx->device->dev,
|
||||
bs->clear_rx_cs,
|
||||
sizeof(bs->clear_rx_cs),
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(ctlr->dma_rx->device->dev, bs->clear_rx_addr)) {
|
||||
dev_err(dev, "cannot map clear_rx_cs - not using DMA mode\n");
|
||||
bs->clear_rx_addr = 0;
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
for (i = 0; i < BCM2835_SPI_NUM_CS; i++) {
|
||||
bs->clear_rx_desc[i] = dmaengine_prep_dma_cyclic(ctlr->dma_rx,
|
||||
bs->clear_rx_addr + i * sizeof(u32),
|
||||
sizeof(u32), 0,
|
||||
DMA_MEM_TO_DEV, 0);
|
||||
if (!bs->clear_rx_desc[i]) {
|
||||
dev_err(dev, "cannot prepare clear_rx_desc - not using DMA mode\n");
|
||||
goto err_release;
|
||||
}
|
||||
|
||||
ret = dmaengine_desc_set_reuse(bs->clear_rx_desc[i]);
|
||||
if (ret) {
|
||||
dev_err(dev, "cannot reuse clear_rx_desc - not using DMA mode\n");
|
||||
goto err_release;
|
||||
}
|
||||
}
|
||||
|
||||
/* all went well, so set can_dma */
|
||||
ctlr->can_dma = bcm2835_spi_can_dma;
|
||||
/* need to do TX AND RX DMA, so we need dummy buffers */
|
||||
ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
|
||||
|
||||
return;
|
||||
|
||||
|
@ -755,7 +1003,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev)
|
|||
dev_err(dev, "issue configuring dma: %d - not using DMA mode\n",
|
||||
ret);
|
||||
err_release:
|
||||
bcm2835_dma_release(ctlr);
|
||||
bcm2835_dma_release(ctlr, bs);
|
||||
err:
|
||||
return;
|
||||
}
|
||||
|
@ -822,7 +1070,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
|
|||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
unsigned long spi_hz, clk_hz, cdiv, spi_used_hz;
|
||||
unsigned long hz_per_byte, byte_limit;
|
||||
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
|
||||
u32 cs = bs->prepare_cs[spi->chip_select];
|
||||
|
||||
/* set clock */
|
||||
spi_hz = tfr->speed_hz;
|
||||
|
@ -844,18 +1092,8 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
|
|||
bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
|
||||
|
||||
/* handle all the 3-wire mode */
|
||||
if (spi->mode & SPI_3WIRE && tfr->rx_buf &&
|
||||
tfr->rx_buf != ctlr->dummy_rx)
|
||||
if (spi->mode & SPI_3WIRE && tfr->rx_buf)
|
||||
cs |= BCM2835_SPI_CS_REN;
|
||||
else
|
||||
cs &= ~BCM2835_SPI_CS_REN;
|
||||
|
||||
/*
|
||||
* The driver always uses software-controlled GPIO Chip Select.
|
||||
* Set the hardware-controlled native Chip Select to an invalid
|
||||
* value to prevent it from interfering.
|
||||
*/
|
||||
cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
|
||||
|
||||
/* set transmit buffers and length */
|
||||
bs->tx_buf = tfr->tx_buf;
|
||||
|
@ -892,7 +1130,6 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
|
|||
{
|
||||
struct spi_device *spi = msg->spi;
|
||||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
|
||||
int ret;
|
||||
|
||||
if (ctlr->can_dma) {
|
||||
|
@ -907,14 +1144,11 @@ static int bcm2835_spi_prepare_message(struct spi_controller *ctlr,
|
|||
return ret;
|
||||
}
|
||||
|
||||
cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
|
||||
|
||||
if (spi->mode & SPI_CPOL)
|
||||
cs |= BCM2835_SPI_CS_CPOL;
|
||||
if (spi->mode & SPI_CPHA)
|
||||
cs |= BCM2835_SPI_CS_CPHA;
|
||||
|
||||
bcm2835_wr(bs, BCM2835_SPI_CS, cs);
|
||||
/*
|
||||
* Set up clock polarity before spi_transfer_one_message() asserts
|
||||
* chip select to avoid a gratuitous clock signal edge.
|
||||
*/
|
||||
bcm2835_wr(bs, BCM2835_SPI_CS, bs->prepare_cs[spi->chip_select]);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -925,11 +1159,12 @@ static void bcm2835_spi_handle_err(struct spi_controller *ctlr,
|
|||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
|
||||
/* if an error occurred and we have an active dma, then terminate */
|
||||
if (cmpxchg(&bs->dma_pending, true, false)) {
|
||||
dmaengine_terminate_sync(ctlr->dma_tx);
|
||||
dmaengine_terminate_sync(ctlr->dma_rx);
|
||||
bcm2835_spi_undo_prologue(bs);
|
||||
}
|
||||
dmaengine_terminate_sync(ctlr->dma_tx);
|
||||
bs->tx_dma_active = false;
|
||||
dmaengine_terminate_sync(ctlr->dma_rx);
|
||||
bs->rx_dma_active = false;
|
||||
bcm2835_spi_undo_prologue(bs);
|
||||
|
||||
/* and reset */
|
||||
bcm2835_spi_reset_hw(ctlr);
|
||||
}
|
||||
|
@ -941,14 +1176,50 @@ static int chip_match_name(struct gpio_chip *chip, void *data)
|
|||
|
||||
static int bcm2835_spi_setup(struct spi_device *spi)
|
||||
{
|
||||
int err;
|
||||
struct spi_controller *ctlr = spi->controller;
|
||||
struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
|
||||
struct gpio_chip *chip;
|
||||
enum gpio_lookup_flags lflags;
|
||||
u32 cs;
|
||||
|
||||
/*
|
||||
* Precalculate SPI slave's CS register value for ->prepare_message():
|
||||
* The driver always uses software-controlled GPIO chip select, hence
|
||||
* set the hardware-controlled native chip select to an invalid value
|
||||
* to prevent it from interfering.
|
||||
*/
|
||||
cs = BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
|
||||
if (spi->mode & SPI_CPOL)
|
||||
cs |= BCM2835_SPI_CS_CPOL;
|
||||
if (spi->mode & SPI_CPHA)
|
||||
cs |= BCM2835_SPI_CS_CPHA;
|
||||
bs->prepare_cs[spi->chip_select] = cs;
|
||||
|
||||
/*
|
||||
* Precalculate SPI slave's CS register value to clear RX FIFO
|
||||
* in case of a TX-only DMA transfer.
|
||||
*/
|
||||
if (ctlr->dma_rx) {
|
||||
bs->clear_rx_cs[spi->chip_select] = cs |
|
||||
BCM2835_SPI_CS_TA |
|
||||
BCM2835_SPI_CS_DMAEN |
|
||||
BCM2835_SPI_CS_CLEAR_RX;
|
||||
dma_sync_single_for_device(ctlr->dma_rx->device->dev,
|
||||
bs->clear_rx_addr,
|
||||
sizeof(bs->clear_rx_cs),
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
/*
|
||||
* sanity checking the native-chipselects
|
||||
*/
|
||||
if (spi->mode & SPI_NO_CS)
|
||||
return 0;
|
||||
if (gpio_is_valid(spi->cs_gpio))
|
||||
/*
|
||||
* The SPI core has successfully requested the CS GPIO line from the
|
||||
* device tree, so we are done.
|
||||
*/
|
||||
if (spi->cs_gpiod)
|
||||
return 0;
|
||||
if (spi->chip_select > 1) {
|
||||
/* error in the case of native CS requested with CS > 1
|
||||
|
@ -959,29 +1230,43 @@ static int bcm2835_spi_setup(struct spi_device *spi)
|
|||
"setup: only two native chip-selects are supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
/* now translate native cs to GPIO */
|
||||
|
||||
/*
|
||||
* Translate native CS to GPIO
|
||||
*
|
||||
* FIXME: poking around in the gpiolib internals like this is
|
||||
* not very good practice. Find a way to locate the real problem
|
||||
* and fix it. Why is the GPIO descriptor in spi->cs_gpiod
|
||||
* sometimes not assigned correctly? Erroneous device trees?
|
||||
*/
|
||||
|
||||
/* get the gpio chip for the base */
|
||||
chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
|
||||
if (!chip)
|
||||
return 0;
|
||||
|
||||
/* and calculate the real CS */
|
||||
spi->cs_gpio = chip->base + 8 - spi->chip_select;
|
||||
/*
|
||||
* Retrieve the corresponding GPIO line used for CS.
|
||||
* The inversion semantics will be handled by the GPIO core
|
||||
* code, so we pass GPIOS_OUT_LOW for "unasserted" and
|
||||
* the correct flag for inversion semantics. The SPI_CS_HIGH
|
||||
* on spi->mode cannot be checked for polarity in this case
|
||||
* as the flag use_gpio_descriptors enforces SPI_CS_HIGH.
|
||||
*/
|
||||
if (of_property_read_bool(spi->dev.of_node, "spi-cs-high"))
|
||||
lflags = GPIO_ACTIVE_HIGH;
|
||||
else
|
||||
lflags = GPIO_ACTIVE_LOW;
|
||||
spi->cs_gpiod = gpiochip_request_own_desc(chip, 8 - spi->chip_select,
|
||||
DRV_NAME,
|
||||
lflags,
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(spi->cs_gpiod))
|
||||
return PTR_ERR(spi->cs_gpiod);
|
||||
|
||||
/* and set up the "mode" and level */
|
||||
dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
|
||||
spi->chip_select, spi->cs_gpio);
|
||||
|
||||
/* set up GPIO as output and pull to the correct level */
|
||||
err = gpio_direction_output(spi->cs_gpio,
|
||||
(spi->mode & SPI_CS_HIGH) ? 0 : 1);
|
||||
if (err) {
|
||||
dev_err(&spi->dev,
|
||||
"could not set CS%i gpio %i as output: %i",
|
||||
spi->chip_select, spi->cs_gpio, err);
|
||||
return err;
|
||||
}
|
||||
dev_info(&spi->dev, "setting up native-CS%i to use GPIO\n",
|
||||
spi->chip_select);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -990,18 +1275,19 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_controller *ctlr;
|
||||
struct bcm2835_spi *bs;
|
||||
struct resource *res;
|
||||
int err;
|
||||
|
||||
ctlr = spi_alloc_master(&pdev->dev, sizeof(*bs));
|
||||
ctlr = spi_alloc_master(&pdev->dev, ALIGN(sizeof(*bs),
|
||||
dma_get_cache_alignment()));
|
||||
if (!ctlr)
|
||||
return -ENOMEM;
|
||||
|
||||
platform_set_drvdata(pdev, ctlr);
|
||||
|
||||
ctlr->use_gpio_descriptors = true;
|
||||
ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
|
||||
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
|
||||
ctlr->num_chipselect = 3;
|
||||
ctlr->num_chipselect = BCM2835_SPI_NUM_CS;
|
||||
ctlr->setup = bcm2835_spi_setup;
|
||||
ctlr->transfer_one = bcm2835_spi_transfer_one;
|
||||
ctlr->handle_err = bcm2835_spi_handle_err;
|
||||
|
@ -1010,8 +1296,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
|||
|
||||
bs = spi_controller_get_devdata(ctlr);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
bs->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
bs->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(bs->regs)) {
|
||||
err = PTR_ERR(bs->regs);
|
||||
goto out_controller_put;
|
||||
|
@ -1026,14 +1311,13 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
|
|||
|
||||
bs->irq = platform_get_irq(pdev, 0);
|
||||
if (bs->irq <= 0) {
|
||||
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
|
||||
err = bs->irq ? bs->irq : -ENODEV;
|
||||
goto out_controller_put;
|
||||
}
|
||||
|
||||
clk_prepare_enable(bs->clk);
|
||||
|
||||
bcm2835_dma_init(ctlr, &pdev->dev);
|
||||
bcm2835_dma_init(ctlr, &pdev->dev, bs);
|
||||
|
||||
/* initialise the hardware with the default polarities */
|
||||
bcm2835_wr(bs, BCM2835_SPI_CS,
|
||||
|
@ -1077,7 +1361,7 @@ static int bcm2835_spi_remove(struct platform_device *pdev)
|
|||
|
||||
clk_disable_unprepare(bs->clk);
|
||||
|
||||
bcm2835_dma_release(ctlr);
|
||||
bcm2835_dma_release(ctlr, bs);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -491,7 +491,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct bcm2835aux_spi *bs;
|
||||
struct resource *res;
|
||||
unsigned long clk_hz;
|
||||
int err;
|
||||
|
||||
|
@ -524,8 +523,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
|
|||
bs = spi_master_get_devdata(master);
|
||||
|
||||
/* the main area */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
bs->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
bs->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(bs->regs)) {
|
||||
err = PTR_ERR(bs->regs);
|
||||
goto out_master_put;
|
||||
|
@ -540,7 +538,6 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
|
|||
|
||||
bs->irq = platform_get_irq(pdev, 0);
|
||||
if (bs->irq <= 0) {
|
||||
dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
|
||||
err = bs->irq ? bs->irq : -ENODEV;
|
||||
goto out_master_put;
|
||||
}
|
||||
|
|
|
@ -330,7 +330,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct bcm63xx_hsspi *bs;
|
||||
struct resource *res_mem;
|
||||
void __iomem *regs;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct clk *clk, *pll_clk = NULL;
|
||||
|
@ -338,13 +337,10 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
|
|||
u32 reg, rate, num_cs = HSSPI_SPI_MAX_CS;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "no irq: %d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
regs = devm_ioremap_resource(dev, res_mem);
|
||||
regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(regs))
|
||||
return PTR_ERR(regs);
|
||||
|
||||
|
|
|
@ -520,10 +520,8 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(dev, "no irq: %d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
clk = devm_clk_get(dev, "spi");
|
||||
if (IS_ERR(clk)) {
|
||||
|
|
|
@ -474,7 +474,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
|
|||
int ret = 0, irq;
|
||||
struct spi_master *master;
|
||||
struct cdns_spi *xspi;
|
||||
struct resource *res;
|
||||
u32 num_cs;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
|
||||
|
@ -485,8 +484,7 @@ static int cdns_spi_probe(struct platform_device *pdev)
|
|||
master->dev.of_node = pdev->dev.of_node;
|
||||
platform_set_drvdata(pdev, master);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
xspi->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
xspi->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(xspi->regs)) {
|
||||
ret = PTR_ERR(xspi->regs);
|
||||
goto remove_master;
|
||||
|
@ -540,7 +538,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
|
|||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq <= 0) {
|
||||
ret = -ENXIO;
|
||||
dev_err(&pdev->dev, "irq number is invalid\n");
|
||||
goto clk_dis_all;
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
static int octeon_spi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *res_mem;
|
||||
void __iomem *reg_base;
|
||||
struct spi_master *master;
|
||||
struct octeon_spi *p;
|
||||
|
@ -30,8 +29,7 @@ static int octeon_spi_probe(struct platform_device *pdev)
|
|||
p = spi_master_get_devdata(master);
|
||||
platform_set_drvdata(pdev, master);
|
||||
|
||||
res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
reg_base = devm_ioremap_resource(&pdev->dev, res_mem);
|
||||
reg_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(reg_base)) {
|
||||
err = PTR_ERR(reg_base);
|
||||
goto fail;
|
||||
|
|
|
@ -91,7 +91,6 @@ static int spi_clps711x_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_clps711x_data *hw;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
int irq, ret;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
|
@ -125,8 +124,7 @@ static int spi_clps711x_probe(struct platform_device *pdev)
|
|||
goto err_out;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hw->syncio = devm_ioremap_resource(&pdev->dev, res);
|
||||
hw->syncio = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(hw->syncio)) {
|
||||
ret = PTR_ERR(hw->syncio);
|
||||
goto err_out;
|
||||
|
|
|
@ -339,7 +339,6 @@ static int mcfqspi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct mcfqspi *mcfqspi;
|
||||
struct resource *res;
|
||||
struct mcfqspi_platform_data *pdata;
|
||||
int status;
|
||||
|
||||
|
@ -362,8 +361,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
|
|||
|
||||
mcfqspi = spi_master_get_devdata(master);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
|
||||
mcfqspi->iobase = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(mcfqspi->iobase)) {
|
||||
status = PTR_ERR(mcfqspi->iobase);
|
||||
goto fail0;
|
||||
|
|
|
@ -79,14 +79,12 @@ static int dw_spi_mscc_init(struct platform_device *pdev,
|
|||
const char *cpu_syscon, u32 if_si_owner_offset)
|
||||
{
|
||||
struct dw_spi_mscc *dwsmscc;
|
||||
struct resource *res;
|
||||
|
||||
dwsmscc = devm_kzalloc(&pdev->dev, sizeof(*dwsmscc), GFP_KERNEL);
|
||||
if (!dwsmscc)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
dwsmscc->spi_mst = devm_ioremap_resource(&pdev->dev, res);
|
||||
dwsmscc->spi_mst = devm_platform_ioremap_resource(pdev, 1);
|
||||
if (IS_ERR(dwsmscc->spi_mst)) {
|
||||
dev_err(&pdev->dev, "SPI_MST region map failed\n");
|
||||
return PTR_ERR(dwsmscc->spi_mst);
|
||||
|
@ -138,7 +136,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
|
|||
struct dw_spi_mmio *dwsmmio);
|
||||
struct dw_spi_mmio *dwsmmio;
|
||||
struct dw_spi *dws;
|
||||
struct resource *mem;
|
||||
int ret;
|
||||
int num_cs;
|
||||
|
||||
|
@ -150,18 +147,15 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
|
|||
dws = &dwsmmio->dws;
|
||||
|
||||
/* Get basic io resource and map it */
|
||||
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
dws->regs = devm_ioremap_resource(&pdev->dev, mem);
|
||||
dws->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(dws->regs)) {
|
||||
dev_err(&pdev->dev, "SPI region map failed\n");
|
||||
return PTR_ERR(dws->regs);
|
||||
}
|
||||
|
||||
dws->irq = platform_get_irq(pdev, 0);
|
||||
if (dws->irq < 0) {
|
||||
dev_err(&pdev->dev, "no irq resource?\n");
|
||||
if (dws->irq < 0)
|
||||
return dws->irq; /* -ENXIO */
|
||||
}
|
||||
|
||||
dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(dwsmmio->clk))
|
||||
|
@ -172,8 +166,10 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
|
|||
|
||||
/* Optional clock needed to access the registers */
|
||||
dwsmmio->pclk = devm_clk_get_optional(&pdev->dev, "pclk");
|
||||
if (IS_ERR(dwsmmio->pclk))
|
||||
return PTR_ERR(dwsmmio->pclk);
|
||||
if (IS_ERR(dwsmmio->pclk)) {
|
||||
ret = PTR_ERR(dwsmmio->pclk);
|
||||
goto out_clk;
|
||||
}
|
||||
ret = clk_prepare_enable(dwsmmio->pclk);
|
||||
if (ret)
|
||||
goto out_clk;
|
||||
|
|
|
@ -106,16 +106,14 @@ static void spi_pci_remove(struct pci_dev *pdev)
|
|||
#ifdef CONFIG_PM_SLEEP
|
||||
static int spi_suspend(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct dw_spi *dws = pci_get_drvdata(pdev);
|
||||
struct dw_spi *dws = dev_get_drvdata(dev);
|
||||
|
||||
return dw_spi_suspend_host(dws);
|
||||
}
|
||||
|
||||
static int spi_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct dw_spi *dws = pci_get_drvdata(pdev);
|
||||
struct dw_spi *dws = dev_get_drvdata(dev);
|
||||
|
||||
return dw_spi_resume_host(dws);
|
||||
}
|
||||
|
|
|
@ -400,10 +400,8 @@ static int efm32_spi_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret <= 0) {
|
||||
dev_err(&pdev->dev, "failed to get rx irq (%d)\n", ret);
|
||||
if (ret <= 0)
|
||||
goto err;
|
||||
}
|
||||
|
||||
ddata->rxirq = ret;
|
||||
|
||||
|
|
|
@ -656,10 +656,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get irq resources\n");
|
||||
if (irq < 0)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
if (!res) {
|
||||
|
|
|
@ -305,12 +305,10 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
|
|||
}
|
||||
|
||||
if (mspi->flags & SPI_CPM1) {
|
||||
struct resource *res;
|
||||
void *pram;
|
||||
|
||||
res = platform_get_resource(to_platform_device(dev),
|
||||
IORESOURCE_MEM, 1);
|
||||
pram = devm_ioremap_resource(dev, res);
|
||||
pram = devm_platform_ioremap_resource(to_platform_device(dev),
|
||||
1);
|
||||
if (IS_ERR(pram))
|
||||
mspi->pram = NULL;
|
||||
else
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -91,9 +91,6 @@ static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
|
|||
|
||||
struct mpc8xxx_spi_probe_info {
|
||||
struct fsl_spi_platform_data pdata;
|
||||
int ngpios;
|
||||
int *gpios;
|
||||
bool *alow_flags;
|
||||
__be32 __iomem *immr_spi_cs;
|
||||
};
|
||||
|
||||
|
|
|
@ -860,10 +860,8 @@ static int fsl_qspi_probe(struct platform_device *pdev)
|
|||
|
||||
/* find the irq */
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to get the irq: %d\n", ret);
|
||||
if (ret < 0)
|
||||
goto err_disable_clk;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, ret,
|
||||
fsl_qspi_irq_handler, 0, pdev->name, q);
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
#include <linux/delay.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/fsl_devices.h>
|
||||
#include <linux/gpio.h>
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kernel.h>
|
||||
|
@ -28,7 +28,6 @@
|
|||
#include <linux/of.h>
|
||||
#include <linux/of_address.h>
|
||||
#include <linux/of_irq.h>
|
||||
#include <linux/of_gpio.h>
|
||||
#include <linux/of_platform.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/spi/spi.h>
|
||||
|
@ -481,32 +480,6 @@ static int fsl_spi_setup(struct spi_device *spi)
|
|||
return retval;
|
||||
}
|
||||
|
||||
if (mpc8xxx_spi->type == TYPE_GRLIB) {
|
||||
if (gpio_is_valid(spi->cs_gpio)) {
|
||||
int desel;
|
||||
|
||||
retval = gpio_request(spi->cs_gpio,
|
||||
dev_name(&spi->dev));
|
||||
if (retval)
|
||||
return retval;
|
||||
|
||||
desel = !(spi->mode & SPI_CS_HIGH);
|
||||
retval = gpio_direction_output(spi->cs_gpio, desel);
|
||||
if (retval) {
|
||||
gpio_free(spi->cs_gpio);
|
||||
return retval;
|
||||
}
|
||||
} else if (spi->cs_gpio != -ENOENT) {
|
||||
if (spi->cs_gpio < 0)
|
||||
return spi->cs_gpio;
|
||||
return -EINVAL;
|
||||
}
|
||||
/* When spi->cs_gpio == -ENOENT, a hole in the phandle list
|
||||
* indicates to use native chipselect if present, or allow for
|
||||
* an always selected chip
|
||||
*/
|
||||
}
|
||||
|
||||
/* Initialize chipselect - might be active for SPI_CS_HIGH mode */
|
||||
fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
|
||||
|
||||
|
@ -515,12 +488,8 @@ static int fsl_spi_setup(struct spi_device *spi)
|
|||
|
||||
static void fsl_spi_cleanup(struct spi_device *spi)
|
||||
{
|
||||
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
|
||||
struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
|
||||
|
||||
if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
|
||||
gpio_free(spi->cs_gpio);
|
||||
|
||||
kfree(cs);
|
||||
spi_set_ctldata(spi, NULL);
|
||||
}
|
||||
|
@ -586,8 +555,8 @@ static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
|
|||
u32 slvsel;
|
||||
u16 cs = spi->chip_select;
|
||||
|
||||
if (gpio_is_valid(spi->cs_gpio)) {
|
||||
gpio_set_value(spi->cs_gpio, on);
|
||||
if (spi->cs_gpiod) {
|
||||
gpiod_set_value(spi->cs_gpiod, on);
|
||||
} else if (cs < mpc8xxx_spi->native_chipselects) {
|
||||
slvsel = mpc8xxx_spi_read_reg(®_base->slvsel);
|
||||
slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
|
||||
|
@ -718,139 +687,19 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
|
|||
|
||||
static void fsl_spi_cs_control(struct spi_device *spi, bool on)
|
||||
{
|
||||
struct device *dev = spi->dev.parent->parent;
|
||||
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
|
||||
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
|
||||
u16 cs = spi->chip_select;
|
||||
|
||||
if (cs < pinfo->ngpios) {
|
||||
int gpio = pinfo->gpios[cs];
|
||||
bool alow = pinfo->alow_flags[cs];
|
||||
|
||||
gpio_set_value(gpio, on ^ alow);
|
||||
if (spi->cs_gpiod) {
|
||||
gpiod_set_value(spi->cs_gpiod, on);
|
||||
} else {
|
||||
if (WARN_ON_ONCE(cs > pinfo->ngpios || !pinfo->immr_spi_cs))
|
||||
struct device *dev = spi->dev.parent->parent;
|
||||
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
|
||||
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
|
||||
|
||||
if (WARN_ON_ONCE(!pinfo->immr_spi_cs))
|
||||
return;
|
||||
iowrite32be(on ? SPI_BOOT_SEL_BIT : 0, pinfo->immr_spi_cs);
|
||||
}
|
||||
}
|
||||
|
||||
static int of_fsl_spi_get_chipselects(struct device *dev)
|
||||
{
|
||||
struct device_node *np = dev->of_node;
|
||||
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
|
||||
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
|
||||
bool spisel_boot = IS_ENABLED(CONFIG_FSL_SOC) &&
|
||||
of_property_read_bool(np, "fsl,spisel_boot");
|
||||
int ngpios;
|
||||
int i = 0;
|
||||
int ret;
|
||||
|
||||
ngpios = of_gpio_count(np);
|
||||
ngpios = max(ngpios, 0);
|
||||
if (ngpios == 0 && !spisel_boot) {
|
||||
/*
|
||||
* SPI w/o chip-select line. One SPI device is still permitted
|
||||
* though.
|
||||
*/
|
||||
pdata->max_chipselect = 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
pinfo->ngpios = ngpios;
|
||||
pinfo->gpios = kmalloc_array(ngpios, sizeof(*pinfo->gpios),
|
||||
GFP_KERNEL);
|
||||
if (!pinfo->gpios)
|
||||
return -ENOMEM;
|
||||
memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
|
||||
|
||||
pinfo->alow_flags = kcalloc(ngpios, sizeof(*pinfo->alow_flags),
|
||||
GFP_KERNEL);
|
||||
if (!pinfo->alow_flags) {
|
||||
ret = -ENOMEM;
|
||||
goto err_alloc_flags;
|
||||
}
|
||||
|
||||
for (; i < ngpios; i++) {
|
||||
int gpio;
|
||||
enum of_gpio_flags flags;
|
||||
|
||||
gpio = of_get_gpio_flags(np, i, &flags);
|
||||
if (!gpio_is_valid(gpio)) {
|
||||
dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
|
||||
ret = gpio;
|
||||
goto err_loop;
|
||||
}
|
||||
|
||||
ret = gpio_request(gpio, dev_name(dev));
|
||||
if (ret) {
|
||||
dev_err(dev, "can't request gpio #%d: %d\n", i, ret);
|
||||
goto err_loop;
|
||||
}
|
||||
|
||||
pinfo->gpios[i] = gpio;
|
||||
pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW;
|
||||
|
||||
ret = gpio_direction_output(pinfo->gpios[i],
|
||||
pinfo->alow_flags[i]);
|
||||
if (ret) {
|
||||
dev_err(dev,
|
||||
"can't set output direction for gpio #%d: %d\n",
|
||||
i, ret);
|
||||
goto err_loop;
|
||||
}
|
||||
}
|
||||
|
||||
#if IS_ENABLED(CONFIG_FSL_SOC)
|
||||
if (spisel_boot) {
|
||||
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
|
||||
if (!pinfo->immr_spi_cs) {
|
||||
ret = -ENOMEM;
|
||||
i = ngpios - 1;
|
||||
goto err_loop;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
pdata->max_chipselect = ngpios + spisel_boot;
|
||||
pdata->cs_control = fsl_spi_cs_control;
|
||||
|
||||
return 0;
|
||||
|
||||
err_loop:
|
||||
while (i >= 0) {
|
||||
if (gpio_is_valid(pinfo->gpios[i]))
|
||||
gpio_free(pinfo->gpios[i]);
|
||||
i--;
|
||||
}
|
||||
|
||||
kfree(pinfo->alow_flags);
|
||||
pinfo->alow_flags = NULL;
|
||||
err_alloc_flags:
|
||||
kfree(pinfo->gpios);
|
||||
pinfo->gpios = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int of_fsl_spi_free_chipselects(struct device *dev)
|
||||
{
|
||||
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
|
||||
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
|
||||
int i;
|
||||
|
||||
if (!pinfo->gpios)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < pdata->max_chipselect; i++) {
|
||||
if (gpio_is_valid(pinfo->gpios[i]))
|
||||
gpio_free(pinfo->gpios[i]);
|
||||
}
|
||||
|
||||
kfree(pinfo->gpios);
|
||||
kfree(pinfo->alow_flags);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int of_fsl_spi_probe(struct platform_device *ofdev)
|
||||
{
|
||||
struct device *dev = &ofdev->dev;
|
||||
|
@ -866,9 +715,21 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
|
|||
|
||||
type = fsl_spi_get_type(&ofdev->dev);
|
||||
if (type == TYPE_FSL) {
|
||||
ret = of_fsl_spi_get_chipselects(dev);
|
||||
if (ret)
|
||||
goto err;
|
||||
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
|
||||
#if IS_ENABLED(CONFIG_FSL_SOC)
|
||||
struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
|
||||
bool spisel_boot = of_property_read_bool(np, "fsl,spisel_boot");
|
||||
|
||||
if (spisel_boot) {
|
||||
pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4);
|
||||
if (!pinfo->immr_spi_cs) {
|
||||
ret = -ENOMEM;
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
pdata->cs_control = fsl_spi_cs_control;
|
||||
}
|
||||
|
||||
ret = of_address_to_resource(np, 0, &mem);
|
||||
|
@ -891,8 +752,6 @@ static int of_fsl_spi_probe(struct platform_device *ofdev)
|
|||
|
||||
err:
|
||||
irq_dispose_mapping(irq);
|
||||
if (type == TYPE_FSL)
|
||||
of_fsl_spi_free_chipselects(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -902,8 +761,6 @@ static int of_fsl_spi_remove(struct platform_device *ofdev)
|
|||
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
|
||||
|
||||
fsl_spi_cpm_free(mpc8xxx_spi);
|
||||
if (mpc8xxx_spi->type == TYPE_FSL)
|
||||
of_fsl_spi_free_chipselects(&ofdev->dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -534,18 +534,14 @@ static int spi_geni_probe(struct platform_device *pdev)
|
|||
int ret, irq;
|
||||
struct spi_master *spi;
|
||||
struct spi_geni_master *mas;
|
||||
struct resource *res;
|
||||
void __iomem *base;
|
||||
struct clk *clk;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "Err getting IRQ %d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
base = devm_ioremap_resource(&pdev->dev, res);
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
|
|
|
@ -290,10 +290,7 @@ static int spi_gpio_request(struct device *dev, struct spi_gpio *spi_gpio)
|
|||
return PTR_ERR(spi_gpio->miso);
|
||||
|
||||
spi_gpio->sck = devm_gpiod_get(dev, "sck", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(spi_gpio->sck))
|
||||
return PTR_ERR(spi_gpio->sck);
|
||||
|
||||
return 0;
|
||||
return PTR_ERR_OR_ZERO(spi_gpio->sck);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
|
|
|
@ -819,22 +819,16 @@ static int lantiq_ssc_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
rx_irq = platform_get_irq_byname(pdev, LTQ_SPI_RX_IRQ_NAME);
|
||||
if (rx_irq < 0) {
|
||||
dev_err(dev, "failed to get %s\n", LTQ_SPI_RX_IRQ_NAME);
|
||||
if (rx_irq < 0)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
tx_irq = platform_get_irq_byname(pdev, LTQ_SPI_TX_IRQ_NAME);
|
||||
if (tx_irq < 0) {
|
||||
dev_err(dev, "failed to get %s\n", LTQ_SPI_TX_IRQ_NAME);
|
||||
if (tx_irq < 0)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
err_irq = platform_get_irq_byname(pdev, LTQ_SPI_ERR_IRQ_NAME);
|
||||
if (err_irq < 0) {
|
||||
dev_err(dev, "failed to get %s\n", LTQ_SPI_ERR_IRQ_NAME);
|
||||
if (err_irq < 0)
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
master = spi_alloc_master(dev, sizeof(struct lantiq_ssc_spi));
|
||||
if (!master)
|
||||
|
|
|
@ -185,7 +185,6 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
|
|||
int ret;
|
||||
struct spi_master *master;
|
||||
struct spi_lp8841_rtc *data;
|
||||
void *iomem;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(*data));
|
||||
if (!master)
|
||||
|
@ -207,8 +206,7 @@ spi_lp8841_rtc_probe(struct platform_device *pdev)
|
|||
|
||||
data = spi_master_get_devdata(master);
|
||||
|
||||
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
data->iomem = devm_ioremap_resource(&pdev->dev, iomem);
|
||||
data->iomem = devm_platform_ioremap_resource(pdev, 0);
|
||||
ret = PTR_ERR_OR_ZERO(data->iomem);
|
||||
if (ret) {
|
||||
dev_err(&pdev->dev, "failed to get IO address\n");
|
||||
|
|
|
@ -503,7 +503,6 @@ static int meson_spicc_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct meson_spicc_device *spicc;
|
||||
struct resource *res;
|
||||
int ret, irq, rate;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(*spicc));
|
||||
|
@ -517,8 +516,7 @@ static int meson_spicc_probe(struct platform_device *pdev)
|
|||
spicc->pdev = pdev;
|
||||
platform_set_drvdata(pdev, spicc);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
spicc->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
spicc->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(spicc->base)) {
|
||||
dev_err(&pdev->dev, "io resource mapping failed\n");
|
||||
ret = PTR_ERR(spicc->base);
|
||||
|
|
|
@ -286,7 +286,6 @@ static int meson_spifc_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct meson_spifc *spifc;
|
||||
struct resource *res;
|
||||
void __iomem *base;
|
||||
unsigned int rate;
|
||||
int ret = 0;
|
||||
|
@ -300,8 +299,7 @@ static int meson_spifc_probe(struct platform_device *pdev)
|
|||
spifc = spi_master_get_devdata(master);
|
||||
spifc->dev = &pdev->dev;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
base = devm_ioremap_resource(spifc->dev, res);
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base)) {
|
||||
ret = PTR_ERR(base);
|
||||
goto out_err;
|
||||
|
|
|
@ -17,6 +17,7 @@
|
|||
#include <linux/platform_data/spi-mt65xx.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
#include <linux/spi/spi.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#define SPI_CFG0_REG 0x0000
|
||||
#define SPI_CFG1_REG 0x0004
|
||||
|
@ -28,6 +29,8 @@
|
|||
#define SPI_STATUS0_REG 0x001c
|
||||
#define SPI_PAD_SEL_REG 0x0024
|
||||
#define SPI_CFG2_REG 0x0028
|
||||
#define SPI_TX_SRC_REG_64 0x002c
|
||||
#define SPI_RX_DST_REG_64 0x0030
|
||||
|
||||
#define SPI_CFG0_SCK_HIGH_OFFSET 0
|
||||
#define SPI_CFG0_SCK_LOW_OFFSET 8
|
||||
|
@ -73,6 +76,10 @@
|
|||
|
||||
#define MTK_SPI_MAX_FIFO_SIZE 32U
|
||||
#define MTK_SPI_PACKET_SIZE 1024
|
||||
#define MTK_SPI_32BITS_MASK (0xffffffff)
|
||||
|
||||
#define DMA_ADDR_EXT_BITS (36)
|
||||
#define DMA_ADDR_DEF_BITS (32)
|
||||
|
||||
struct mtk_spi_compatible {
|
||||
bool need_pad_sel;
|
||||
|
@ -80,6 +87,8 @@ struct mtk_spi_compatible {
|
|||
bool must_tx;
|
||||
/* some IC design adjust cfg register to enhance time accuracy */
|
||||
bool enhance_timing;
|
||||
/* some IC support DMA addr extension */
|
||||
bool dma_ext;
|
||||
};
|
||||
|
||||
struct mtk_spi {
|
||||
|
@ -102,6 +111,13 @@ static const struct mtk_spi_compatible mt2712_compat = {
|
|||
.must_tx = true,
|
||||
};
|
||||
|
||||
static const struct mtk_spi_compatible mt6765_compat = {
|
||||
.need_pad_sel = true,
|
||||
.must_tx = true,
|
||||
.enhance_timing = true,
|
||||
.dma_ext = true,
|
||||
};
|
||||
|
||||
static const struct mtk_spi_compatible mt7622_compat = {
|
||||
.must_tx = true,
|
||||
.enhance_timing = true,
|
||||
|
@ -137,6 +153,9 @@ static const struct of_device_id mtk_spi_of_match[] = {
|
|||
{ .compatible = "mediatek,mt6589-spi",
|
||||
.data = (void *)&mtk_common_compat,
|
||||
},
|
||||
{ .compatible = "mediatek,mt6765-spi",
|
||||
.data = (void *)&mt6765_compat,
|
||||
},
|
||||
{ .compatible = "mediatek,mt7622-spi",
|
||||
.data = (void *)&mt7622_compat,
|
||||
},
|
||||
|
@ -371,10 +390,25 @@ static void mtk_spi_setup_dma_addr(struct spi_master *master,
|
|||
{
|
||||
struct mtk_spi *mdata = spi_master_get_devdata(master);
|
||||
|
||||
if (mdata->tx_sgl)
|
||||
writel(xfer->tx_dma, mdata->base + SPI_TX_SRC_REG);
|
||||
if (mdata->rx_sgl)
|
||||
writel(xfer->rx_dma, mdata->base + SPI_RX_DST_REG);
|
||||
if (mdata->tx_sgl) {
|
||||
writel((u32)(xfer->tx_dma & MTK_SPI_32BITS_MASK),
|
||||
mdata->base + SPI_TX_SRC_REG);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (mdata->dev_comp->dma_ext)
|
||||
writel((u32)(xfer->tx_dma >> 32),
|
||||
mdata->base + SPI_TX_SRC_REG_64);
|
||||
#endif
|
||||
}
|
||||
|
||||
if (mdata->rx_sgl) {
|
||||
writel((u32)(xfer->rx_dma & MTK_SPI_32BITS_MASK),
|
||||
mdata->base + SPI_RX_DST_REG);
|
||||
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
|
||||
if (mdata->dev_comp->dma_ext)
|
||||
writel((u32)(xfer->rx_dma >> 32),
|
||||
mdata->base + SPI_RX_DST_REG_64);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
static int mtk_spi_fifo_transfer(struct spi_master *master,
|
||||
|
@ -586,7 +620,7 @@ static int mtk_spi_probe(struct platform_device *pdev)
|
|||
struct mtk_spi *mdata;
|
||||
const struct of_device_id *of_id;
|
||||
struct resource *res;
|
||||
int i, irq, ret;
|
||||
int i, irq, ret, addr_bits;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(*mdata));
|
||||
if (!master) {
|
||||
|
@ -664,7 +698,6 @@ static int mtk_spi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
|
||||
ret = irq;
|
||||
goto err_put_master;
|
||||
}
|
||||
|
@ -753,6 +786,15 @@ static int mtk_spi_probe(struct platform_device *pdev)
|
|||
}
|
||||
}
|
||||
|
||||
if (mdata->dev_comp->dma_ext)
|
||||
addr_bits = DMA_ADDR_EXT_BITS;
|
||||
else
|
||||
addr_bits = DMA_ADDR_DEF_BITS;
|
||||
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(addr_bits));
|
||||
if (ret)
|
||||
dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n",
|
||||
addr_bits, ret);
|
||||
|
||||
return 0;
|
||||
|
||||
err_disable_runtime_pm:
|
||||
|
|
|
@ -327,7 +327,6 @@ static int mt7621_spi_probe(struct platform_device *pdev)
|
|||
struct spi_controller *master;
|
||||
struct mt7621_spi *rs;
|
||||
void __iomem *base;
|
||||
struct resource *r;
|
||||
int status = 0;
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
@ -336,8 +335,7 @@ static int mt7621_spi_probe(struct platform_device *pdev)
|
|||
if (!match)
|
||||
return -EINVAL;
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
base = devm_ioremap_resource(&pdev->dev, r);
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
|
|
|
@ -532,7 +532,6 @@ static int mxs_spi_probe(struct platform_device *pdev)
|
|||
struct spi_master *master;
|
||||
struct mxs_spi *spi;
|
||||
struct mxs_ssp *ssp;
|
||||
struct resource *iores;
|
||||
struct clk *clk;
|
||||
void __iomem *base;
|
||||
int devid, clk_freq;
|
||||
|
@ -545,12 +544,11 @@ static int mxs_spi_probe(struct platform_device *pdev)
|
|||
*/
|
||||
const int clk_freq_default = 160000000;
|
||||
|
||||
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
irq_err = platform_get_irq(pdev, 0);
|
||||
if (irq_err < 0)
|
||||
return irq_err;
|
||||
|
||||
base = devm_ioremap_resource(&pdev->dev, iores);
|
||||
base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(base))
|
||||
return PTR_ERR(base);
|
||||
|
||||
|
|
769
drivers/spi/spi-npcm-fiu.c
Normal file
769
drivers/spi/spi-npcm-fiu.c
Normal file
|
@ -0,0 +1,769 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
// Copyright (c) 2019 Nuvoton Technology corporation.
|
||||
|
||||
#include <linux/init.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/ioport.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/regmap.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/spi/spi-mem.h>
|
||||
#include <linux/mfd/syscon.h>
|
||||
|
||||
/* NPCM7xx GCR module */
|
||||
#define NPCM7XX_INTCR3_OFFSET 0x9C
|
||||
#define NPCM7XX_INTCR3_FIU_FIX BIT(6)
|
||||
|
||||
/* Flash Interface Unit (FIU) Registers */
|
||||
#define NPCM_FIU_DRD_CFG 0x00
|
||||
#define NPCM_FIU_DWR_CFG 0x04
|
||||
#define NPCM_FIU_UMA_CFG 0x08
|
||||
#define NPCM_FIU_UMA_CTS 0x0C
|
||||
#define NPCM_FIU_UMA_CMD 0x10
|
||||
#define NPCM_FIU_UMA_ADDR 0x14
|
||||
#define NPCM_FIU_PRT_CFG 0x18
|
||||
#define NPCM_FIU_UMA_DW0 0x20
|
||||
#define NPCM_FIU_UMA_DW1 0x24
|
||||
#define NPCM_FIU_UMA_DW2 0x28
|
||||
#define NPCM_FIU_UMA_DW3 0x2C
|
||||
#define NPCM_FIU_UMA_DR0 0x30
|
||||
#define NPCM_FIU_UMA_DR1 0x34
|
||||
#define NPCM_FIU_UMA_DR2 0x38
|
||||
#define NPCM_FIU_UMA_DR3 0x3C
|
||||
#define NPCM_FIU_MAX_REG_LIMIT 0x80
|
||||
|
||||
/* FIU Direct Read Configuration Register */
|
||||
#define NPCM_FIU_DRD_CFG_LCK BIT(31)
|
||||
#define NPCM_FIU_DRD_CFG_R_BURST GENMASK(25, 24)
|
||||
#define NPCM_FIU_DRD_CFG_ADDSIZ GENMASK(17, 16)
|
||||
#define NPCM_FIU_DRD_CFG_DBW GENMASK(13, 12)
|
||||
#define NPCM_FIU_DRD_CFG_ACCTYPE GENMASK(9, 8)
|
||||
#define NPCM_FIU_DRD_CFG_RDCMD GENMASK(7, 0)
|
||||
#define NPCM_FIU_DRD_ADDSIZ_SHIFT 16
|
||||
#define NPCM_FIU_DRD_DBW_SHIFT 12
|
||||
#define NPCM_FIU_DRD_ACCTYPE_SHIFT 8
|
||||
|
||||
/* FIU Direct Write Configuration Register */
|
||||
#define NPCM_FIU_DWR_CFG_LCK BIT(31)
|
||||
#define NPCM_FIU_DWR_CFG_W_BURST GENMASK(25, 24)
|
||||
#define NPCM_FIU_DWR_CFG_ADDSIZ GENMASK(17, 16)
|
||||
#define NPCM_FIU_DWR_CFG_ABPCK GENMASK(11, 10)
|
||||
#define NPCM_FIU_DWR_CFG_DBPCK GENMASK(9, 8)
|
||||
#define NPCM_FIU_DWR_CFG_WRCMD GENMASK(7, 0)
|
||||
#define NPCM_FIU_DWR_ADDSIZ_SHIFT 16
|
||||
#define NPCM_FIU_DWR_ABPCK_SHIFT 10
|
||||
#define NPCM_FIU_DWR_DBPCK_SHIFT 8
|
||||
|
||||
/* FIU UMA Configuration Register */
|
||||
#define NPCM_FIU_UMA_CFG_LCK BIT(31)
|
||||
#define NPCM_FIU_UMA_CFG_CMMLCK BIT(30)
|
||||
#define NPCM_FIU_UMA_CFG_RDATSIZ GENMASK(28, 24)
|
||||
#define NPCM_FIU_UMA_CFG_DBSIZ GENMASK(23, 21)
|
||||
#define NPCM_FIU_UMA_CFG_WDATSIZ GENMASK(20, 16)
|
||||
#define NPCM_FIU_UMA_CFG_ADDSIZ GENMASK(13, 11)
|
||||
#define NPCM_FIU_UMA_CFG_CMDSIZ BIT(10)
|
||||
#define NPCM_FIU_UMA_CFG_RDBPCK GENMASK(9, 8)
|
||||
#define NPCM_FIU_UMA_CFG_DBPCK GENMASK(7, 6)
|
||||
#define NPCM_FIU_UMA_CFG_WDBPCK GENMASK(5, 4)
|
||||
#define NPCM_FIU_UMA_CFG_ADBPCK GENMASK(3, 2)
|
||||
#define NPCM_FIU_UMA_CFG_CMBPCK GENMASK(1, 0)
|
||||
#define NPCM_FIU_UMA_CFG_ADBPCK_SHIFT 2
|
||||
#define NPCM_FIU_UMA_CFG_WDBPCK_SHIFT 4
|
||||
#define NPCM_FIU_UMA_CFG_DBPCK_SHIFT 6
|
||||
#define NPCM_FIU_UMA_CFG_RDBPCK_SHIFT 8
|
||||
#define NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT 11
|
||||
#define NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT 16
|
||||
#define NPCM_FIU_UMA_CFG_DBSIZ_SHIFT 21
|
||||
#define NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT 24
|
||||
|
||||
/* FIU UMA Control and Status Register */
|
||||
#define NPCM_FIU_UMA_CTS_RDYIE BIT(25)
|
||||
#define NPCM_FIU_UMA_CTS_RDYST BIT(24)
|
||||
#define NPCM_FIU_UMA_CTS_SW_CS BIT(16)
|
||||
#define NPCM_FIU_UMA_CTS_DEV_NUM GENMASK(9, 8)
|
||||
#define NPCM_FIU_UMA_CTS_EXEC_DONE BIT(0)
|
||||
#define NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT 8
|
||||
|
||||
/* FIU UMA Command Register */
|
||||
#define NPCM_FIU_UMA_CMD_DUM3 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_CMD_DUM2 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_CMD_DUM1 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_CMD_CMD GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Address Register */
|
||||
#define NPCM_FIU_UMA_ADDR_UMA_ADDR GENMASK(31, 0)
|
||||
#define NPCM_FIU_UMA_ADDR_AB3 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_ADDR_AB2 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_ADDR_AB1 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_ADDR_AB0 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Write Data Bytes 0-3 Register */
|
||||
#define NPCM_FIU_UMA_DW0_WB3 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DW0_WB2 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DW0_WB1 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DW0_WB0 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Write Data Bytes 4-7 Register */
|
||||
#define NPCM_FIU_UMA_DW1_WB7 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DW1_WB6 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DW1_WB5 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DW1_WB4 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Write Data Bytes 8-11 Register */
|
||||
#define NPCM_FIU_UMA_DW2_WB11 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DW2_WB10 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DW2_WB9 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DW2_WB8 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Write Data Bytes 12-15 Register */
|
||||
#define NPCM_FIU_UMA_DW3_WB15 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DW3_WB14 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DW3_WB13 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DW3_WB12 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Read Data Bytes 0-3 Register */
|
||||
#define NPCM_FIU_UMA_DR0_RB3 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DR0_RB2 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DR0_RB1 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DR0_RB0 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Read Data Bytes 4-7 Register */
|
||||
#define NPCM_FIU_UMA_DR1_RB15 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DR1_RB14 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DR1_RB13 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DR1_RB12 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Read Data Bytes 8-11 Register */
|
||||
#define NPCM_FIU_UMA_DR2_RB15 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DR2_RB14 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DR2_RB13 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DR2_RB12 GENMASK(7, 0)
|
||||
|
||||
/* FIU UMA Read Data Bytes 12-15 Register */
|
||||
#define NPCM_FIU_UMA_DR3_RB15 GENMASK(31, 24)
|
||||
#define NPCM_FIU_UMA_DR3_RB14 GENMASK(23, 16)
|
||||
#define NPCM_FIU_UMA_DR3_RB13 GENMASK(15, 8)
|
||||
#define NPCM_FIU_UMA_DR3_RB12 GENMASK(7, 0)
|
||||
|
||||
/* FIU Read Mode */
|
||||
enum {
|
||||
DRD_SINGLE_WIRE_MODE = 0,
|
||||
DRD_DUAL_IO_MODE = 1,
|
||||
DRD_QUAD_IO_MODE = 2,
|
||||
DRD_SPI_X_MODE = 3,
|
||||
};
|
||||
|
||||
enum {
|
||||
DWR_ABPCK_BIT_PER_CLK = 0,
|
||||
DWR_ABPCK_2_BIT_PER_CLK = 1,
|
||||
DWR_ABPCK_4_BIT_PER_CLK = 2,
|
||||
};
|
||||
|
||||
enum {
|
||||
DWR_DBPCK_BIT_PER_CLK = 0,
|
||||
DWR_DBPCK_2_BIT_PER_CLK = 1,
|
||||
DWR_DBPCK_4_BIT_PER_CLK = 2,
|
||||
};
|
||||
|
||||
#define NPCM_FIU_DRD_16_BYTE_BURST 0x3000000
|
||||
#define NPCM_FIU_DWR_16_BYTE_BURST 0x3000000
|
||||
|
||||
#define MAP_SIZE_128MB 0x8000000
|
||||
#define MAP_SIZE_16MB 0x1000000
|
||||
#define MAP_SIZE_8MB 0x800000
|
||||
|
||||
#define NUM_BITS_IN_BYTE 8
|
||||
#define FIU_DRD_MAX_DUMMY_NUMBER 3
|
||||
#define NPCM_MAX_CHIP_NUM 4
|
||||
#define CHUNK_SIZE 16
|
||||
#define UMA_MICRO_SEC_TIMEOUT 150
|
||||
|
||||
enum {
|
||||
FIU0 = 0,
|
||||
FIU3,
|
||||
FIUX,
|
||||
};
|
||||
|
||||
struct npcm_fiu_info {
|
||||
char *name;
|
||||
u32 fiu_id;
|
||||
u32 max_map_size;
|
||||
u32 max_cs;
|
||||
};
|
||||
|
||||
struct fiu_data {
|
||||
const struct npcm_fiu_info *npcm_fiu_data_info;
|
||||
int fiu_max;
|
||||
};
|
||||
|
||||
static const struct npcm_fiu_info npxm7xx_fiu_info[] = {
|
||||
{.name = "FIU0", .fiu_id = FIU0,
|
||||
.max_map_size = MAP_SIZE_128MB, .max_cs = 2},
|
||||
{.name = "FIU3", .fiu_id = FIU3,
|
||||
.max_map_size = MAP_SIZE_128MB, .max_cs = 4},
|
||||
{.name = "FIUX", .fiu_id = FIUX,
|
||||
.max_map_size = MAP_SIZE_16MB, .max_cs = 2} };
|
||||
|
||||
static const struct fiu_data npxm7xx_fiu_data = {
|
||||
.npcm_fiu_data_info = npxm7xx_fiu_info,
|
||||
.fiu_max = 3,
|
||||
};
|
||||
|
||||
struct npcm_fiu_spi;
|
||||
|
||||
struct npcm_fiu_chip {
|
||||
void __iomem *flash_region_mapped_ptr;
|
||||
struct npcm_fiu_spi *fiu;
|
||||
unsigned long clkrate;
|
||||
u32 chipselect;
|
||||
};
|
||||
|
||||
struct npcm_fiu_spi {
|
||||
struct npcm_fiu_chip chip[NPCM_MAX_CHIP_NUM];
|
||||
const struct npcm_fiu_info *info;
|
||||
struct spi_mem_op drd_op;
|
||||
struct resource *res_mem;
|
||||
struct regmap *regmap;
|
||||
unsigned long clkrate;
|
||||
struct device *dev;
|
||||
struct clk *clk;
|
||||
bool spix_mode;
|
||||
};
|
||||
|
||||
static const struct regmap_config npcm_mtd_regmap_config = {
|
||||
.reg_bits = 32,
|
||||
.val_bits = 32,
|
||||
.reg_stride = 4,
|
||||
.max_register = NPCM_FIU_MAX_REG_LIMIT,
|
||||
};
|
||||
|
||||
static void npcm_fiu_set_drd(struct npcm_fiu_spi *fiu,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
|
||||
NPCM_FIU_DRD_CFG_ACCTYPE,
|
||||
ilog2(op->addr.buswidth) <<
|
||||
NPCM_FIU_DRD_ACCTYPE_SHIFT);
|
||||
fiu->drd_op.addr.buswidth = op->addr.buswidth;
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
|
||||
NPCM_FIU_DRD_CFG_DBW,
|
||||
((op->dummy.nbytes * ilog2(op->addr.buswidth))
|
||||
/ NUM_BITS_IN_BYTE) << NPCM_FIU_DRD_DBW_SHIFT);
|
||||
fiu->drd_op.dummy.nbytes = op->dummy.nbytes;
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
|
||||
NPCM_FIU_DRD_CFG_RDCMD, op->cmd.opcode);
|
||||
fiu->drd_op.cmd.opcode = op->cmd.opcode;
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
|
||||
NPCM_FIU_DRD_CFG_ADDSIZ,
|
||||
(op->addr.nbytes - 3) << NPCM_FIU_DRD_ADDSIZ_SHIFT);
|
||||
fiu->drd_op.addr.nbytes = op->addr.nbytes;
|
||||
}
|
||||
|
||||
static ssize_t npcm_fiu_direct_read(struct spi_mem_dirmap_desc *desc,
|
||||
u64 offs, size_t len, void *buf)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu =
|
||||
spi_controller_get_devdata(desc->mem->spi->master);
|
||||
struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
|
||||
void __iomem *src = (void __iomem *)(chip->flash_region_mapped_ptr +
|
||||
offs);
|
||||
u8 *buf_rx = buf;
|
||||
u32 i;
|
||||
|
||||
if (fiu->spix_mode) {
|
||||
for (i = 0 ; i < len ; i++)
|
||||
*(buf_rx + i) = ioread8(src + i);
|
||||
} else {
|
||||
if (desc->info.op_tmpl.addr.buswidth != fiu->drd_op.addr.buswidth ||
|
||||
desc->info.op_tmpl.dummy.nbytes != fiu->drd_op.dummy.nbytes ||
|
||||
desc->info.op_tmpl.cmd.opcode != fiu->drd_op.cmd.opcode ||
|
||||
desc->info.op_tmpl.addr.nbytes != fiu->drd_op.addr.nbytes)
|
||||
npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
|
||||
|
||||
memcpy_fromio(buf_rx, src, len);
|
||||
}
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static ssize_t npcm_fiu_direct_write(struct spi_mem_dirmap_desc *desc,
|
||||
u64 offs, size_t len, const void *buf)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu =
|
||||
spi_controller_get_devdata(desc->mem->spi->master);
|
||||
struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
|
||||
void __iomem *dst = (void __iomem *)(chip->flash_region_mapped_ptr +
|
||||
offs);
|
||||
const u8 *buf_tx = buf;
|
||||
u32 i;
|
||||
|
||||
if (fiu->spix_mode)
|
||||
for (i = 0 ; i < len ; i++)
|
||||
iowrite8(*(buf_tx + i), dst + i);
|
||||
else
|
||||
memcpy_toio(dst, buf_tx, len);
|
||||
|
||||
return len;
|
||||
}
|
||||
|
||||
static int npcm_fiu_uma_read(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op, u32 addr,
|
||||
bool is_address_size, u8 *data, u32 data_size)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu =
|
||||
spi_controller_get_devdata(mem->spi->master);
|
||||
u32 uma_cfg = BIT(10);
|
||||
u32 data_reg[4];
|
||||
int ret;
|
||||
u32 val;
|
||||
u32 i;
|
||||
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
|
||||
NPCM_FIU_UMA_CTS_DEV_NUM,
|
||||
(mem->spi->chip_select <<
|
||||
NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
|
||||
NPCM_FIU_UMA_CMD_CMD, op->cmd.opcode);
|
||||
|
||||
if (is_address_size) {
|
||||
uma_cfg |= ilog2(op->cmd.buswidth);
|
||||
uma_cfg |= ilog2(op->addr.buswidth)
|
||||
<< NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
|
||||
uma_cfg |= ilog2(op->dummy.buswidth)
|
||||
<< NPCM_FIU_UMA_CFG_DBPCK_SHIFT;
|
||||
uma_cfg |= ilog2(op->data.buswidth)
|
||||
<< NPCM_FIU_UMA_CFG_RDBPCK_SHIFT;
|
||||
uma_cfg |= op->dummy.nbytes << NPCM_FIU_UMA_CFG_DBSIZ_SHIFT;
|
||||
uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
|
||||
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, addr);
|
||||
} else {
|
||||
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
|
||||
}
|
||||
|
||||
uma_cfg |= data_size << NPCM_FIU_UMA_CFG_RDATSIZ_SHIFT;
|
||||
regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
|
||||
regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
|
||||
NPCM_FIU_UMA_CTS_EXEC_DONE,
|
||||
NPCM_FIU_UMA_CTS_EXEC_DONE);
|
||||
ret = regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
|
||||
(!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
|
||||
UMA_MICRO_SEC_TIMEOUT);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (data_size) {
|
||||
for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
|
||||
regmap_read(fiu->regmap, NPCM_FIU_UMA_DR0 + (i * 4),
|
||||
&data_reg[i]);
|
||||
memcpy(data, data_reg, data_size);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int npcm_fiu_uma_write(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op, u8 cmd,
|
||||
bool is_address_size, u8 *data, u32 data_size)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu =
|
||||
spi_controller_get_devdata(mem->spi->master);
|
||||
u32 uma_cfg = BIT(10);
|
||||
u32 data_reg[4] = {0};
|
||||
u32 val;
|
||||
u32 i;
|
||||
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
|
||||
NPCM_FIU_UMA_CTS_DEV_NUM,
|
||||
(mem->spi->chip_select <<
|
||||
NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
|
||||
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CMD,
|
||||
NPCM_FIU_UMA_CMD_CMD, cmd);
|
||||
|
||||
if (data_size) {
|
||||
memcpy(data_reg, data, data_size);
|
||||
for (i = 0; i < DIV_ROUND_UP(data_size, 4); i++)
|
||||
regmap_write(fiu->regmap, NPCM_FIU_UMA_DW0 + (i * 4),
|
||||
data_reg[i]);
|
||||
}
|
||||
|
||||
if (is_address_size) {
|
||||
uma_cfg |= ilog2(op->cmd.buswidth);
|
||||
uma_cfg |= ilog2(op->addr.buswidth) <<
|
||||
NPCM_FIU_UMA_CFG_ADBPCK_SHIFT;
|
||||
uma_cfg |= ilog2(op->data.buswidth) <<
|
||||
NPCM_FIU_UMA_CFG_WDBPCK_SHIFT;
|
||||
uma_cfg |= op->addr.nbytes << NPCM_FIU_UMA_CFG_ADDSIZ_SHIFT;
|
||||
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, op->addr.val);
|
||||
} else {
|
||||
regmap_write(fiu->regmap, NPCM_FIU_UMA_ADDR, 0x0);
|
||||
}
|
||||
|
||||
uma_cfg |= (data_size << NPCM_FIU_UMA_CFG_WDATSIZ_SHIFT);
|
||||
regmap_write(fiu->regmap, NPCM_FIU_UMA_CFG, uma_cfg);
|
||||
|
||||
regmap_write_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
|
||||
NPCM_FIU_UMA_CTS_EXEC_DONE,
|
||||
NPCM_FIU_UMA_CTS_EXEC_DONE);
|
||||
|
||||
return regmap_read_poll_timeout(fiu->regmap, NPCM_FIU_UMA_CTS, val,
|
||||
(!(val & NPCM_FIU_UMA_CTS_EXEC_DONE)), 0,
|
||||
UMA_MICRO_SEC_TIMEOUT);
|
||||
}
|
||||
|
||||
static int npcm_fiu_manualwrite(struct spi_mem *mem,
|
||||
const struct spi_mem_op *op)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu =
|
||||
spi_controller_get_devdata(mem->spi->master);
|
||||
u8 *data = (u8 *)op->data.buf.out;
|
||||
u32 num_data_chunks;
|
||||
u32 remain_data;
|
||||
u32 idx = 0;
|
||||
int ret;
|
||||
|
||||
num_data_chunks = op->data.nbytes / CHUNK_SIZE;
|
||||
remain_data = op->data.nbytes % CHUNK_SIZE;
|
||||
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
|
||||
NPCM_FIU_UMA_CTS_DEV_NUM,
|
||||
(mem->spi->chip_select <<
|
||||
NPCM_FIU_UMA_CTS_DEV_NUM_SHIFT));
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
|
||||
NPCM_FIU_UMA_CTS_SW_CS, 0);
|
||||
|
||||
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, true, NULL, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Starting the data writing loop in multiples of 8 */
|
||||
for (idx = 0; idx < num_data_chunks; ++idx) {
|
||||
ret = npcm_fiu_uma_write(mem, op, data[0], false,
|
||||
&data[1], CHUNK_SIZE - 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
data += CHUNK_SIZE;
|
||||
}
|
||||
|
||||
/* Handling chunk remains */
|
||||
if (remain_data > 0) {
|
||||
ret = npcm_fiu_uma_write(mem, op, data[0], false,
|
||||
&data[1], remain_data - 1);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_UMA_CTS,
|
||||
NPCM_FIU_UMA_CTS_SW_CS, NPCM_FIU_UMA_CTS_SW_CS);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int npcm_fiu_read(struct spi_mem *mem, const struct spi_mem_op *op)
|
||||
{
|
||||
u8 *data = op->data.buf.in;
|
||||
int i, readlen, currlen;
|
||||
u8 *buf_ptr;
|
||||
u32 addr;
|
||||
int ret;
|
||||
|
||||
i = 0;
|
||||
currlen = op->data.nbytes;
|
||||
|
||||
do {
|
||||
addr = ((u32)op->addr.val + i);
|
||||
if (currlen < 16)
|
||||
readlen = currlen;
|
||||
else
|
||||
readlen = 16;
|
||||
|
||||
buf_ptr = data + i;
|
||||
ret = npcm_fiu_uma_read(mem, op, addr, true, buf_ptr,
|
||||
readlen);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
i += readlen;
|
||||
currlen -= 16;
|
||||
} while (currlen > 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void npcm_fiux_set_direct_wr(struct npcm_fiu_spi *fiu)
|
||||
{
|
||||
regmap_write(fiu->regmap, NPCM_FIU_DWR_CFG,
|
||||
NPCM_FIU_DWR_16_BYTE_BURST);
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
|
||||
NPCM_FIU_DWR_CFG_ABPCK,
|
||||
DWR_ABPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_ABPCK_SHIFT);
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DWR_CFG,
|
||||
NPCM_FIU_DWR_CFG_DBPCK,
|
||||
DWR_DBPCK_4_BIT_PER_CLK << NPCM_FIU_DWR_DBPCK_SHIFT);
|
||||
}
|
||||
|
||||
static void npcm_fiux_set_direct_rd(struct npcm_fiu_spi *fiu)
|
||||
{
|
||||
u32 rx_dummy = 0;
|
||||
|
||||
regmap_write(fiu->regmap, NPCM_FIU_DRD_CFG,
|
||||
NPCM_FIU_DRD_16_BYTE_BURST);
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
|
||||
NPCM_FIU_DRD_CFG_ACCTYPE,
|
||||
DRD_SPI_X_MODE << NPCM_FIU_DRD_ACCTYPE_SHIFT);
|
||||
regmap_update_bits(fiu->regmap, NPCM_FIU_DRD_CFG,
|
||||
NPCM_FIU_DRD_CFG_DBW,
|
||||
rx_dummy << NPCM_FIU_DRD_DBW_SHIFT);
|
||||
}
|
||||
|
||||
static int npcm_fiu_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu =
|
||||
spi_controller_get_devdata(mem->spi->master);
|
||||
struct npcm_fiu_chip *chip = &fiu->chip[mem->spi->chip_select];
|
||||
int ret = 0;
|
||||
u8 *buf;
|
||||
|
||||
dev_dbg(fiu->dev, "cmd:%#x mode:%d.%d.%d.%d addr:%#llx len:%#x\n",
|
||||
op->cmd.opcode, op->cmd.buswidth, op->addr.buswidth,
|
||||
op->dummy.buswidth, op->data.buswidth, op->addr.val,
|
||||
op->data.nbytes);
|
||||
|
||||
if (fiu->spix_mode || op->addr.nbytes > 4)
|
||||
return -ENOTSUPP;
|
||||
|
||||
if (fiu->clkrate != chip->clkrate) {
|
||||
ret = clk_set_rate(fiu->clk, chip->clkrate);
|
||||
if (ret < 0)
|
||||
dev_warn(fiu->dev, "Failed setting %lu frequency, stay at %lu frequency\n",
|
||||
chip->clkrate, fiu->clkrate);
|
||||
else
|
||||
fiu->clkrate = chip->clkrate;
|
||||
}
|
||||
|
||||
if (op->data.dir == SPI_MEM_DATA_IN) {
|
||||
if (!op->addr.nbytes) {
|
||||
buf = op->data.buf.in;
|
||||
ret = npcm_fiu_uma_read(mem, op, op->addr.val, false,
|
||||
buf, op->data.nbytes);
|
||||
} else {
|
||||
ret = npcm_fiu_read(mem, op);
|
||||
}
|
||||
} else {
|
||||
if (!op->addr.nbytes && !op->data.nbytes)
|
||||
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
|
||||
NULL, 0);
|
||||
if (op->addr.nbytes && !op->data.nbytes) {
|
||||
int i;
|
||||
u8 buf_addr[4];
|
||||
u32 addr = op->addr.val;
|
||||
|
||||
for (i = op->addr.nbytes - 1; i >= 0; i--) {
|
||||
buf_addr[i] = addr & 0xff;
|
||||
addr >>= 8;
|
||||
}
|
||||
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
|
||||
buf_addr, op->addr.nbytes);
|
||||
}
|
||||
if (!op->addr.nbytes && op->data.nbytes)
|
||||
ret = npcm_fiu_uma_write(mem, op, op->cmd.opcode, false,
|
||||
(u8 *)op->data.buf.out,
|
||||
op->data.nbytes);
|
||||
if (op->addr.nbytes && op->data.nbytes)
|
||||
ret = npcm_fiu_manualwrite(mem, op);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int npcm_fiu_dirmap_create(struct spi_mem_dirmap_desc *desc)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu =
|
||||
spi_controller_get_devdata(desc->mem->spi->master);
|
||||
struct npcm_fiu_chip *chip = &fiu->chip[desc->mem->spi->chip_select];
|
||||
struct regmap *gcr_regmap;
|
||||
|
||||
if (!fiu->res_mem) {
|
||||
dev_warn(fiu->dev, "Reserved memory not defined, direct read disabled\n");
|
||||
desc->nodirmap = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!fiu->spix_mode &&
|
||||
desc->info.op_tmpl.data.dir == SPI_MEM_DATA_OUT) {
|
||||
desc->nodirmap = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!chip->flash_region_mapped_ptr) {
|
||||
chip->flash_region_mapped_ptr =
|
||||
devm_ioremap_nocache(fiu->dev, (fiu->res_mem->start +
|
||||
(fiu->info->max_map_size *
|
||||
desc->mem->spi->chip_select)),
|
||||
(u32)desc->info.length);
|
||||
if (!chip->flash_region_mapped_ptr) {
|
||||
dev_warn(fiu->dev, "Error mapping memory region, direct read disabled\n");
|
||||
desc->nodirmap = true;
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(fiu->dev->of_node, "nuvoton,npcm750-fiu")) {
|
||||
gcr_regmap =
|
||||
syscon_regmap_lookup_by_compatible("nuvoton,npcm750-gcr");
|
||||
if (IS_ERR(gcr_regmap)) {
|
||||
dev_warn(fiu->dev, "Didn't find nuvoton,npcm750-gcr, direct read disabled\n");
|
||||
desc->nodirmap = true;
|
||||
return 0;
|
||||
}
|
||||
regmap_update_bits(gcr_regmap, NPCM7XX_INTCR3_OFFSET,
|
||||
NPCM7XX_INTCR3_FIU_FIX,
|
||||
NPCM7XX_INTCR3_FIU_FIX);
|
||||
}
|
||||
|
||||
if (desc->info.op_tmpl.data.dir == SPI_MEM_DATA_IN) {
|
||||
if (!fiu->spix_mode)
|
||||
npcm_fiu_set_drd(fiu, &desc->info.op_tmpl);
|
||||
else
|
||||
npcm_fiux_set_direct_rd(fiu);
|
||||
|
||||
} else {
|
||||
npcm_fiux_set_direct_wr(fiu);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int npcm_fiu_setup(struct spi_device *spi)
|
||||
{
|
||||
struct spi_controller *ctrl = spi->master;
|
||||
struct npcm_fiu_spi *fiu = spi_controller_get_devdata(ctrl);
|
||||
struct npcm_fiu_chip *chip;
|
||||
|
||||
chip = &fiu->chip[spi->chip_select];
|
||||
chip->fiu = fiu;
|
||||
chip->chipselect = spi->chip_select;
|
||||
chip->clkrate = spi->max_speed_hz;
|
||||
|
||||
fiu->clkrate = clk_get_rate(fiu->clk);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct spi_controller_mem_ops npcm_fiu_mem_ops = {
|
||||
.exec_op = npcm_fiu_exec_op,
|
||||
.dirmap_create = npcm_fiu_dirmap_create,
|
||||
.dirmap_read = npcm_fiu_direct_read,
|
||||
.dirmap_write = npcm_fiu_direct_write,
|
||||
};
|
||||
|
||||
static const struct of_device_id npcm_fiu_dt_ids[] = {
|
||||
{ .compatible = "nuvoton,npcm750-fiu", .data = &npxm7xx_fiu_data },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
||||
static int npcm_fiu_probe(struct platform_device *pdev)
|
||||
{
|
||||
const struct fiu_data *fiu_data_match;
|
||||
const struct of_device_id *match;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct spi_controller *ctrl;
|
||||
struct npcm_fiu_spi *fiu;
|
||||
void __iomem *regbase;
|
||||
struct resource *res;
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
ctrl = spi_alloc_master(dev, sizeof(*fiu));
|
||||
if (!ctrl)
|
||||
return -ENOMEM;
|
||||
|
||||
fiu = spi_controller_get_devdata(ctrl);
|
||||
|
||||
match = of_match_device(npcm_fiu_dt_ids, dev);
|
||||
if (!match || !match->data) {
|
||||
dev_err(dev, "No compatible OF match\n");
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
fiu_data_match = match->data;
|
||||
id = of_alias_get_id(dev->of_node, "fiu");
|
||||
if (id < 0 || id >= fiu_data_match->fiu_max) {
|
||||
dev_err(dev, "Invalid platform device id: %d\n", id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
fiu->info = &fiu_data_match->npcm_fiu_data_info[id];
|
||||
|
||||
platform_set_drvdata(pdev, fiu);
|
||||
fiu->dev = dev;
|
||||
|
||||
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "control");
|
||||
regbase = devm_ioremap_resource(dev, res);
|
||||
if (IS_ERR(regbase))
|
||||
return PTR_ERR(regbase);
|
||||
|
||||
fiu->regmap = devm_regmap_init_mmio(dev, regbase,
|
||||
&npcm_mtd_regmap_config);
|
||||
if (IS_ERR(fiu->regmap)) {
|
||||
dev_err(dev, "Failed to create regmap\n");
|
||||
return PTR_ERR(fiu->regmap);
|
||||
}
|
||||
|
||||
fiu->res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
|
||||
"memory");
|
||||
fiu->clk = devm_clk_get(dev, NULL);
|
||||
if (IS_ERR(fiu->clk))
|
||||
return PTR_ERR(fiu->clk);
|
||||
|
||||
fiu->spix_mode = of_property_read_bool(dev->of_node,
|
||||
"nuvoton,spix-mode");
|
||||
|
||||
platform_set_drvdata(pdev, fiu);
|
||||
clk_prepare_enable(fiu->clk);
|
||||
|
||||
ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD
|
||||
| SPI_TX_DUAL | SPI_TX_QUAD;
|
||||
ctrl->setup = npcm_fiu_setup;
|
||||
ctrl->bus_num = -1;
|
||||
ctrl->mem_ops = &npcm_fiu_mem_ops;
|
||||
ctrl->num_chipselect = fiu->info->max_cs;
|
||||
ctrl->dev.of_node = dev->of_node;
|
||||
|
||||
ret = devm_spi_register_master(dev, ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int npcm_fiu_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct npcm_fiu_spi *fiu = platform_get_drvdata(pdev);
|
||||
|
||||
clk_disable_unprepare(fiu->clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
MODULE_DEVICE_TABLE(of, npcm_fiu_dt_ids);
|
||||
|
||||
static struct platform_driver npcm_fiu_driver = {
|
||||
.driver = {
|
||||
.name = "NPCM-FIU",
|
||||
.bus = &platform_bus_type,
|
||||
.of_match_table = npcm_fiu_dt_ids,
|
||||
},
|
||||
.probe = npcm_fiu_probe,
|
||||
.remove = npcm_fiu_remove,
|
||||
};
|
||||
module_platform_driver(npcm_fiu_driver);
|
||||
|
||||
MODULE_DESCRIPTION("Nuvoton FLASH Interface Unit SPI Controller Driver");
|
||||
MODULE_AUTHOR("Tomer Maimon <tomer.maimon@nuvoton.com>");
|
||||
MODULE_LICENSE("GPL v2");
|
|
@ -341,7 +341,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct npcm_pspi *priv;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
unsigned long clk_hz;
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
int num_cs, i;
|
||||
|
@ -368,8 +367,7 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
|||
priv->is_save_param = false;
|
||||
priv->id = pdev->id;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base)) {
|
||||
ret = PTR_ERR(priv->base);
|
||||
goto out_master_put;
|
||||
|
@ -388,7 +386,6 @@ static int npcm_pspi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get IRQ\n");
|
||||
ret = irq;
|
||||
goto out_disable_clk;
|
||||
}
|
||||
|
|
|
@ -327,7 +327,6 @@ static int nuc900_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct nuc900_spi *hw;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
int err = 0;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
|
||||
|
@ -358,8 +357,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
|
|||
hw->bitbang.chipselect = nuc900_spi_chipsel;
|
||||
hw->bitbang.txrx_bufs = nuc900_spi_txrx;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hw->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
hw->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(hw->regs)) {
|
||||
err = PTR_ERR(hw->regs);
|
||||
goto err_pdata;
|
||||
|
@ -367,7 +365,6 @@ static int nuc900_spi_probe(struct platform_device *pdev)
|
|||
|
||||
hw->irq = platform_get_irq(pdev, 0);
|
||||
if (hw->irq < 0) {
|
||||
dev_err(&pdev->dev, "No IRQ specified\n");
|
||||
err = -ENOENT;
|
||||
goto err_pdata;
|
||||
}
|
||||
|
|
|
@ -1007,10 +1007,8 @@ static int nxp_fspi_probe(struct platform_device *pdev)
|
|||
|
||||
/* find the irq */
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "failed to get the irq: %d\n", ret);
|
||||
if (ret < 0)
|
||||
goto err_disable_clk;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, ret,
|
||||
nxp_fspi_irq_handler, 0, pdev->name, f);
|
||||
|
|
|
@ -240,7 +240,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
|||
struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
|
||||
struct tiny_spi *hw;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
unsigned int i;
|
||||
int err = -ENODEV;
|
||||
|
||||
|
@ -264,8 +263,7 @@ static int tiny_spi_probe(struct platform_device *pdev)
|
|||
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
|
||||
|
||||
/* find and map our resources */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hw->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
hw->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(hw->base)) {
|
||||
err = PTR_ERR(hw->base);
|
||||
goto exit;
|
||||
|
|
|
@ -570,7 +570,6 @@ static int pic32_sqi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct pic32_sqi *sqi;
|
||||
struct resource *reg;
|
||||
int ret;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(*sqi));
|
||||
|
@ -580,8 +579,7 @@ static int pic32_sqi_probe(struct platform_device *pdev)
|
|||
sqi = spi_master_get_devdata(master);
|
||||
sqi->master = master;
|
||||
|
||||
reg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sqi->regs = devm_ioremap_resource(&pdev->dev, reg);
|
||||
sqi->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(sqi->regs)) {
|
||||
ret = PTR_ERR(sqi->regs);
|
||||
goto err_free_master;
|
||||
|
@ -590,7 +588,6 @@ static int pic32_sqi_probe(struct platform_device *pdev)
|
|||
/* irq */
|
||||
sqi->irq = platform_get_irq(pdev, 0);
|
||||
if (sqi->irq < 0) {
|
||||
dev_err(&pdev->dev, "no irq found\n");
|
||||
ret = sqi->irq;
|
||||
goto err_free_master;
|
||||
}
|
||||
|
|
|
@ -711,22 +711,16 @@ static int pic32_spi_hw_probe(struct platform_device *pdev,
|
|||
|
||||
/* get irq resources: err-irq, rx-irq, tx-irq */
|
||||
pic32s->fault_irq = platform_get_irq_byname(pdev, "fault");
|
||||
if (pic32s->fault_irq < 0) {
|
||||
dev_err(&pdev->dev, "fault-irq not found\n");
|
||||
if (pic32s->fault_irq < 0)
|
||||
return pic32s->fault_irq;
|
||||
}
|
||||
|
||||
pic32s->rx_irq = platform_get_irq_byname(pdev, "rx");
|
||||
if (pic32s->rx_irq < 0) {
|
||||
dev_err(&pdev->dev, "rx-irq not found\n");
|
||||
if (pic32s->rx_irq < 0)
|
||||
return pic32s->rx_irq;
|
||||
}
|
||||
|
||||
pic32s->tx_irq = platform_get_irq_byname(pdev, "tx");
|
||||
if (pic32s->tx_irq < 0) {
|
||||
dev_err(&pdev->dev, "tx-irq not found\n");
|
||||
if (pic32s->tx_irq < 0)
|
||||
return pic32s->tx_irq;
|
||||
}
|
||||
|
||||
/* get clock */
|
||||
pic32s->clk = devm_clk_get(&pdev->dev, "mck0");
|
||||
|
|
|
@ -424,7 +424,6 @@ static int qcom_qspi_probe(struct platform_device *pdev)
|
|||
{
|
||||
int ret;
|
||||
struct device *dev;
|
||||
struct resource *res;
|
||||
struct spi_master *master;
|
||||
struct qcom_qspi *ctrl;
|
||||
|
||||
|
@ -440,8 +439,7 @@ static int qcom_qspi_probe(struct platform_device *pdev)
|
|||
|
||||
spin_lock_init(&ctrl->lock);
|
||||
ctrl->dev = dev;
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
ctrl->base = devm_ioremap_resource(dev, res);
|
||||
ctrl->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(ctrl->base)) {
|
||||
ret = PTR_ERR(ctrl->base);
|
||||
goto exit_probe_master_put;
|
||||
|
@ -454,10 +452,8 @@ static int qcom_qspi_probe(struct platform_device *pdev)
|
|||
goto exit_probe_master_put;
|
||||
|
||||
ret = platform_get_irq(pdev, 0);
|
||||
if (ret < 0) {
|
||||
dev_err(dev, "Failed to get irq %d\n", ret);
|
||||
if (ret < 0)
|
||||
goto exit_probe_master_put;
|
||||
}
|
||||
ret = devm_request_irq(dev, ret, qcom_qspi_irq,
|
||||
IRQF_TRIGGER_HIGH, dev_name(dev), ctrl);
|
||||
if (ret) {
|
||||
|
|
|
@ -135,12 +135,10 @@ static int rb4xx_spi_probe(struct platform_device *pdev)
|
|||
struct spi_master *master;
|
||||
struct clk *ahb_clk;
|
||||
struct rb4xx_spi *rbspi;
|
||||
struct resource *r;
|
||||
int err;
|
||||
void __iomem *spi_base;
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
spi_base = devm_ioremap_resource(&pdev->dev, r);
|
||||
spi_base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(spi_base))
|
||||
return PTR_ERR(spi_base);
|
||||
|
||||
|
|
|
@ -487,7 +487,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
|
|||
struct s3c2410_spi_info *pdata;
|
||||
struct s3c24xx_spi *hw;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
int err = 0;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
|
||||
|
@ -536,8 +535,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
|
|||
dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
|
||||
|
||||
/* find and map our resources */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
hw->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
hw->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(hw->regs)) {
|
||||
err = PTR_ERR(hw->regs);
|
||||
goto err_no_pdata;
|
||||
|
@ -545,7 +543,6 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
|
|||
|
||||
hw->irq = platform_get_irq(pdev, 0);
|
||||
if (hw->irq < 0) {
|
||||
dev_err(&pdev->dev, "No IRQ specified\n");
|
||||
err = -ENOENT;
|
||||
goto err_no_pdata;
|
||||
}
|
||||
|
|
|
@ -1297,7 +1297,6 @@ static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
|
|||
|
||||
static int sh_msiof_spi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct resource *r;
|
||||
struct spi_controller *ctlr;
|
||||
const struct sh_msiof_chipdata *chipdata;
|
||||
struct sh_msiof_spi_info *info;
|
||||
|
@ -1346,13 +1345,11 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
|
|||
|
||||
i = platform_get_irq(pdev, 0);
|
||||
if (i < 0) {
|
||||
dev_err(&pdev->dev, "cannot get IRQ\n");
|
||||
ret = i;
|
||||
goto err1;
|
||||
}
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
p->mapbase = devm_ioremap_resource(&pdev->dev, r);
|
||||
p->mapbase = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(p->mapbase)) {
|
||||
ret = PTR_ERR(p->mapbase);
|
||||
goto err1;
|
||||
|
|
|
@ -437,10 +437,8 @@ static int spi_sh_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "platform_get_irq error: %d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
|
||||
if (master == NULL) {
|
||||
|
|
|
@ -292,7 +292,6 @@ sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
|
|||
static int sifive_spi_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct sifive_spi *spi;
|
||||
struct resource *res;
|
||||
int ret, irq, num_cs;
|
||||
u32 cs_bits, max_bits_per_word;
|
||||
struct spi_master *master;
|
||||
|
@ -307,8 +306,7 @@ static int sifive_spi_probe(struct platform_device *pdev)
|
|||
init_completion(&spi->done);
|
||||
platform_set_drvdata(pdev, master);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
spi->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
spi->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(spi->regs)) {
|
||||
ret = PTR_ERR(spi->regs);
|
||||
goto put_master;
|
||||
|
@ -323,7 +321,6 @@ static int sifive_spi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "Unable to find interrupt\n");
|
||||
ret = irq;
|
||||
goto put_master;
|
||||
}
|
||||
|
|
|
@ -1070,7 +1070,6 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct sirfsoc_spi *sspi;
|
||||
struct spi_master *master;
|
||||
struct resource *mem_res;
|
||||
const struct sirf_spi_comp_data *spi_comp_data;
|
||||
int irq;
|
||||
int ret;
|
||||
|
@ -1097,8 +1096,7 @@ static int spi_sirfsoc_probe(struct platform_device *pdev)
|
|||
sspi->fifo_level_chk_mask = (sspi->fifo_size / 4) - 1;
|
||||
sspi->dat_max_frm_len = spi_comp_data->dat_max_frm_len;
|
||||
sspi->fifo_size = spi_comp_data->fifo_size;
|
||||
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
|
||||
sspi->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(sspi->base)) {
|
||||
ret = PTR_ERR(sspi->base);
|
||||
goto free_master;
|
||||
|
|
|
@ -410,7 +410,6 @@ static int mtk_spi_slave_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get irq (%d)\n", irq);
|
||||
ret = irq;
|
||||
goto err_put_ctlr;
|
||||
}
|
||||
|
|
|
@ -86,6 +86,7 @@
|
|||
#define BIT_WDG_EN BIT(2)
|
||||
|
||||
/* Definition of PMIC reset status register */
|
||||
#define HWRST_STATUS_SECURITY 0x02
|
||||
#define HWRST_STATUS_RECOVERY 0x20
|
||||
#define HWRST_STATUS_NORMAL 0x40
|
||||
#define HWRST_STATUS_ALARM 0x50
|
||||
|
@ -97,6 +98,8 @@
|
|||
#define HWRST_STATUS_AUTODLOADER 0xa0
|
||||
#define HWRST_STATUS_IQMODE 0xb0
|
||||
#define HWRST_STATUS_SPRDISK 0xc0
|
||||
#define HWRST_STATUS_FACTORYTEST 0xe0
|
||||
#define HWRST_STATUS_WATCHDOG 0xf0
|
||||
|
||||
/* Use default timeout 50 ms that converts to watchdog values */
|
||||
#define WDG_LOAD_VAL ((50 * 1000) / 32768)
|
||||
|
@ -162,14 +165,16 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
|
|||
int read_timeout = ADI_READ_TIMEOUT;
|
||||
unsigned long flags;
|
||||
u32 val, rd_addr;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
|
||||
ADI_HWSPINLOCK_TIMEOUT,
|
||||
&flags);
|
||||
if (ret) {
|
||||
dev_err(sadi->dev, "get the hw lock failed\n");
|
||||
return ret;
|
||||
if (sadi->hwlock) {
|
||||
ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
|
||||
ADI_HWSPINLOCK_TIMEOUT,
|
||||
&flags);
|
||||
if (ret) {
|
||||
dev_err(sadi->dev, "get the hw lock failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -216,7 +221,8 @@ static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val)
|
|||
*read_val = val & RD_VALUE_MASK;
|
||||
|
||||
out:
|
||||
hwspin_unlock_irqrestore(sadi->hwlock, &flags);
|
||||
if (sadi->hwlock)
|
||||
hwspin_unlock_irqrestore(sadi->hwlock, &flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -227,12 +233,14 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
|
|||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
|
||||
ADI_HWSPINLOCK_TIMEOUT,
|
||||
&flags);
|
||||
if (ret) {
|
||||
dev_err(sadi->dev, "get the hw lock failed\n");
|
||||
return ret;
|
||||
if (sadi->hwlock) {
|
||||
ret = hwspin_lock_timeout_irqsave(sadi->hwlock,
|
||||
ADI_HWSPINLOCK_TIMEOUT,
|
||||
&flags);
|
||||
if (ret) {
|
||||
dev_err(sadi->dev, "get the hw lock failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
ret = sprd_adi_drain_fifo(sadi);
|
||||
|
@ -258,7 +266,8 @@ static int sprd_adi_write(struct sprd_adi *sadi, u32 reg_paddr, u32 val)
|
|||
}
|
||||
|
||||
out:
|
||||
hwspin_unlock_irqrestore(sadi->hwlock, &flags);
|
||||
if (sadi->hwlock)
|
||||
hwspin_unlock_irqrestore(sadi->hwlock, &flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -307,6 +316,18 @@ static int sprd_adi_transfer_one(struct spi_controller *ctlr,
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void sprd_adi_set_wdt_rst_mode(struct sprd_adi *sadi)
|
||||
{
|
||||
#ifdef CONFIG_SPRD_WATCHDOG
|
||||
u32 val;
|
||||
|
||||
/* Set default watchdog reboot mode */
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
|
||||
val |= HWRST_STATUS_WATCHDOG;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
|
||||
#endif
|
||||
}
|
||||
|
||||
static int sprd_adi_restart_handler(struct notifier_block *this,
|
||||
unsigned long mode, void *cmd)
|
||||
{
|
||||
|
@ -336,11 +357,16 @@ static int sprd_adi_restart_handler(struct notifier_block *this,
|
|||
reboot_mode = HWRST_STATUS_IQMODE;
|
||||
else if (!strncmp(cmd, "sprdisk", 7))
|
||||
reboot_mode = HWRST_STATUS_SPRDISK;
|
||||
else if (!strncmp(cmd, "tospanic", 8))
|
||||
reboot_mode = HWRST_STATUS_SECURITY;
|
||||
else if (!strncmp(cmd, "factorytest", 11))
|
||||
reboot_mode = HWRST_STATUS_FACTORYTEST;
|
||||
else
|
||||
reboot_mode = HWRST_STATUS_NORMAL;
|
||||
|
||||
/* Record the reboot mode */
|
||||
sprd_adi_read(sadi, sadi->slave_pbase + PMIC_RST_STATUS, &val);
|
||||
val &= ~HWRST_STATUS_WATCHDOG;
|
||||
val |= reboot_mode;
|
||||
sprd_adi_write(sadi, sadi->slave_pbase + PMIC_RST_STATUS, val);
|
||||
|
||||
|
@ -380,9 +406,6 @@ static void sprd_adi_hw_init(struct sprd_adi *sadi)
|
|||
const __be32 *list;
|
||||
u32 tmp;
|
||||
|
||||
/* Address bits select default 12 bits */
|
||||
writel_relaxed(0, sadi->base + REG_ADI_CTRL0);
|
||||
|
||||
/* Set all channels as default priority */
|
||||
writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL);
|
||||
writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH);
|
||||
|
@ -459,19 +482,30 @@ static int sprd_adi_probe(struct platform_device *pdev)
|
|||
sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET;
|
||||
sadi->ctlr = ctlr;
|
||||
sadi->dev = &pdev->dev;
|
||||
ret = of_hwspin_lock_get_id_byname(np, "adi");
|
||||
if (ret < 0) {
|
||||
dev_err(&pdev->dev, "can not get the hardware spinlock\n");
|
||||
goto put_ctlr;
|
||||
}
|
||||
|
||||
sadi->hwlock = devm_hwspin_lock_request_specific(&pdev->dev, ret);
|
||||
if (!sadi->hwlock) {
|
||||
ret = -ENXIO;
|
||||
goto put_ctlr;
|
||||
ret = of_hwspin_lock_get_id(np, 0);
|
||||
if (ret > 0 || (IS_ENABLED(CONFIG_HWSPINLOCK) && ret == 0)) {
|
||||
sadi->hwlock =
|
||||
devm_hwspin_lock_request_specific(&pdev->dev, ret);
|
||||
if (!sadi->hwlock) {
|
||||
ret = -ENXIO;
|
||||
goto put_ctlr;
|
||||
}
|
||||
} else {
|
||||
switch (ret) {
|
||||
case -ENOENT:
|
||||
dev_info(&pdev->dev, "no hardware spinlock supplied\n");
|
||||
break;
|
||||
default:
|
||||
dev_err(&pdev->dev,
|
||||
"failed to find hwlock id, %d\n", ret);
|
||||
/* fall-through */
|
||||
case -EPROBE_DEFER:
|
||||
goto put_ctlr;
|
||||
}
|
||||
}
|
||||
|
||||
sprd_adi_hw_init(sadi);
|
||||
sprd_adi_set_wdt_rst_mode(sadi);
|
||||
|
||||
ctlr->dev.of_node = pdev->dev.of_node;
|
||||
ctlr->bus_num = pdev->id;
|
||||
|
|
|
@ -843,10 +843,8 @@ static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
|
|||
int ret;
|
||||
|
||||
ss->irq = platform_get_irq(pdev, 0);
|
||||
if (ss->irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get irq resource\n");
|
||||
if (ss->irq < 0)
|
||||
return ss->irq;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
|
||||
0, pdev->name, ss);
|
||||
|
|
|
@ -298,7 +298,6 @@ static int spi_st_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct device_node *np = pdev->dev.of_node;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
struct spi_st *spi_st;
|
||||
int irq, ret = 0;
|
||||
u32 var;
|
||||
|
@ -331,8 +330,7 @@ static int spi_st_probe(struct platform_device *pdev)
|
|||
init_completion(&spi_st->done);
|
||||
|
||||
/* Get resources */
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
spi_st->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
spi_st->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(spi_st->base)) {
|
||||
ret = PTR_ERR(spi_st->base);
|
||||
goto clk_disable;
|
||||
|
|
|
@ -570,11 +570,8 @@ static int stm32_qspi_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
if (irq != -EPROBE_DEFER)
|
||||
dev_err(dev, "IRQ error missing or invalid\n");
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
|
||||
ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0,
|
||||
dev_name(dev), qspi);
|
||||
|
|
|
@ -428,7 +428,6 @@ static int sun4i_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct sun4i_spi *sspi;
|
||||
struct resource *res;
|
||||
int ret = 0, irq;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
|
||||
|
@ -440,8 +439,7 @@ static int sun4i_spi_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, master);
|
||||
sspi = spi_master_get_devdata(master);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
|
||||
sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(sspi->base_addr)) {
|
||||
ret = PTR_ERR(sspi->base_addr);
|
||||
goto err_free_master;
|
||||
|
@ -449,7 +447,6 @@ static int sun4i_spi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "No spi IRQ specified\n");
|
||||
ret = -ENXIO;
|
||||
goto err_free_master;
|
||||
}
|
||||
|
|
|
@ -435,7 +435,6 @@ static int sun6i_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct sun6i_spi *sspi;
|
||||
struct resource *res;
|
||||
int ret = 0, irq;
|
||||
|
||||
master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
|
||||
|
@ -447,8 +446,7 @@ static int sun6i_spi_probe(struct platform_device *pdev)
|
|||
platform_set_drvdata(pdev, master);
|
||||
sspi = spi_master_get_devdata(master);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
|
||||
sspi->base_addr = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(sspi->base_addr)) {
|
||||
ret = PTR_ERR(sspi->base_addr);
|
||||
goto err_free_master;
|
||||
|
@ -456,7 +454,6 @@ static int sun6i_spi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "No spi IRQ specified\n");
|
||||
ret = -ENXIO;
|
||||
goto err_free_master;
|
||||
}
|
||||
|
|
|
@ -670,7 +670,6 @@ static int synquacer_spi_probe(struct platform_device *pdev)
|
|||
|
||||
rx_irq = platform_get_irq(pdev, 0);
|
||||
if (rx_irq <= 0) {
|
||||
dev_err(&pdev->dev, "get rx_irq failed (%d)\n", rx_irq);
|
||||
ret = rx_irq;
|
||||
goto put_spi;
|
||||
}
|
||||
|
@ -685,7 +684,6 @@ static int synquacer_spi_probe(struct platform_device *pdev)
|
|||
|
||||
tx_irq = platform_get_irq(pdev, 1);
|
||||
if (tx_irq <= 0) {
|
||||
dev_err(&pdev->dev, "get tx_irq failed (%d)\n", tx_irq);
|
||||
ret = tx_irq;
|
||||
goto put_spi;
|
||||
}
|
||||
|
|
|
@ -419,7 +419,6 @@ static int tegra_sflash_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct tegra_sflash_data *tsd;
|
||||
struct resource *r;
|
||||
int ret;
|
||||
const struct of_device_id *match;
|
||||
|
||||
|
@ -451,8 +450,7 @@ static int tegra_sflash_probe(struct platform_device *pdev)
|
|||
&master->max_speed_hz))
|
||||
master->max_speed_hz = 25000000; /* 25MHz */
|
||||
|
||||
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
tsd->base = devm_ioremap_resource(&pdev->dev, r);
|
||||
tsd->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(tsd->base)) {
|
||||
ret = PTR_ERR(tsd->base);
|
||||
goto exit_free_master;
|
||||
|
|
|
@ -717,7 +717,6 @@ static int ti_qspi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "no irq resource?\n");
|
||||
ret = irq;
|
||||
goto free_master;
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@
|
|||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/interrupt.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/module.h>
|
||||
|
@ -16,6 +17,7 @@
|
|||
#include <asm/unaligned.h>
|
||||
|
||||
#define SSI_TIMEOUT_MS 2000
|
||||
#define SSI_POLL_TIMEOUT_US 200
|
||||
#define SSI_MAX_CLK_DIVIDER 254
|
||||
#define SSI_MIN_CLK_DIVIDER 4
|
||||
|
||||
|
@ -227,8 +229,7 @@ static void uniphier_spi_setup_transfer(struct spi_device *spi,
|
|||
priv->speed_hz = t->speed_hz;
|
||||
}
|
||||
|
||||
if (!priv->is_save_param)
|
||||
priv->is_save_param = true;
|
||||
priv->is_save_param = true;
|
||||
|
||||
/* reset FIFOs */
|
||||
val = SSI_FC_TXFFL | SSI_FC_RXFFL;
|
||||
|
@ -291,21 +292,23 @@ static void uniphier_spi_recv(struct uniphier_spi_priv *priv)
|
|||
|
||||
static void uniphier_spi_fill_tx_fifo(struct uniphier_spi_priv *priv)
|
||||
{
|
||||
unsigned int tx_count;
|
||||
unsigned int fifo_threshold, fill_bytes;
|
||||
u32 val;
|
||||
|
||||
tx_count = DIV_ROUND_UP(priv->tx_bytes,
|
||||
fifo_threshold = DIV_ROUND_UP(priv->rx_bytes,
|
||||
bytes_per_word(priv->bits_per_word));
|
||||
tx_count = min(tx_count, SSI_FIFO_DEPTH);
|
||||
fifo_threshold = min(fifo_threshold, SSI_FIFO_DEPTH);
|
||||
|
||||
fill_bytes = fifo_threshold - (priv->rx_bytes - priv->tx_bytes);
|
||||
|
||||
/* set fifo threshold */
|
||||
val = readl(priv->base + SSI_FC);
|
||||
val &= ~(SSI_FC_TXFTH_MASK | SSI_FC_RXFTH_MASK);
|
||||
val |= FIELD_PREP(SSI_FC_TXFTH_MASK, tx_count);
|
||||
val |= FIELD_PREP(SSI_FC_RXFTH_MASK, tx_count);
|
||||
val |= FIELD_PREP(SSI_FC_TXFTH_MASK, fifo_threshold);
|
||||
val |= FIELD_PREP(SSI_FC_RXFTH_MASK, fifo_threshold);
|
||||
writel(val, priv->base + SSI_FC);
|
||||
|
||||
while (tx_count--)
|
||||
while (fill_bytes--)
|
||||
uniphier_spi_send(priv);
|
||||
}
|
||||
|
||||
|
@ -324,20 +327,14 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable)
|
|||
writel(val, priv->base + SSI_FPS);
|
||||
}
|
||||
|
||||
static int uniphier_spi_transfer_one(struct spi_master *master,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
static int uniphier_spi_transfer_one_irq(struct spi_master *master,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
struct device *dev = master->dev.parent;
|
||||
unsigned long time_left;
|
||||
|
||||
/* Terminate and return success for 0 byte length transfer */
|
||||
if (!t->len)
|
||||
return 0;
|
||||
|
||||
uniphier_spi_setup_transfer(spi, t);
|
||||
|
||||
reinit_completion(&priv->xfer_done);
|
||||
|
||||
uniphier_spi_fill_tx_fifo(priv);
|
||||
|
@ -357,6 +354,59 @@ static int uniphier_spi_transfer_one(struct spi_master *master,
|
|||
return priv->error;
|
||||
}
|
||||
|
||||
static int uniphier_spi_transfer_one_poll(struct spi_master *master,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
int loop = SSI_POLL_TIMEOUT_US * 10;
|
||||
|
||||
while (priv->tx_bytes) {
|
||||
uniphier_spi_fill_tx_fifo(priv);
|
||||
|
||||
while ((priv->rx_bytes - priv->tx_bytes) > 0) {
|
||||
while (!(readl(priv->base + SSI_SR) & SSI_SR_RNE)
|
||||
&& loop--)
|
||||
ndelay(100);
|
||||
|
||||
if (loop == -1)
|
||||
goto irq_transfer;
|
||||
|
||||
uniphier_spi_recv(priv);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
irq_transfer:
|
||||
return uniphier_spi_transfer_one_irq(master, spi, t);
|
||||
}
|
||||
|
||||
static int uniphier_spi_transfer_one(struct spi_master *master,
|
||||
struct spi_device *spi,
|
||||
struct spi_transfer *t)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
unsigned long threshold;
|
||||
|
||||
/* Terminate and return success for 0 byte length transfer */
|
||||
if (!t->len)
|
||||
return 0;
|
||||
|
||||
uniphier_spi_setup_transfer(spi, t);
|
||||
|
||||
/*
|
||||
* If the transfer operation will take longer than
|
||||
* SSI_POLL_TIMEOUT_US, it should use irq.
|
||||
*/
|
||||
threshold = DIV_ROUND_UP(SSI_POLL_TIMEOUT_US * priv->speed_hz,
|
||||
USEC_PER_SEC * BITS_PER_BYTE);
|
||||
if (t->len > threshold)
|
||||
return uniphier_spi_transfer_one_irq(master, spi, t);
|
||||
else
|
||||
return uniphier_spi_transfer_one_poll(master, spi, t);
|
||||
}
|
||||
|
||||
static int uniphier_spi_prepare_transfer_hardware(struct spi_master *master)
|
||||
{
|
||||
struct uniphier_spi_priv *priv = spi_master_get_devdata(master);
|
||||
|
@ -420,7 +470,6 @@ static int uniphier_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct uniphier_spi_priv *priv;
|
||||
struct spi_master *master;
|
||||
struct resource *res;
|
||||
unsigned long clk_rate;
|
||||
int irq;
|
||||
int ret;
|
||||
|
@ -435,8 +484,7 @@ static int uniphier_spi_probe(struct platform_device *pdev)
|
|||
priv->master = master;
|
||||
priv->is_save_param = false;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
priv->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
priv->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(priv->base)) {
|
||||
ret = PTR_ERR(priv->base);
|
||||
goto out_master_put;
|
||||
|
@ -455,7 +503,6 @@ static int uniphier_spi_probe(struct platform_device *pdev)
|
|||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "failed to get IRQ\n");
|
||||
ret = irq;
|
||||
goto out_disable_clk;
|
||||
}
|
||||
|
|
|
@ -370,7 +370,6 @@ static int xlp_spi_probe(struct platform_device *pdev)
|
|||
{
|
||||
struct spi_master *master;
|
||||
struct xlp_spi_priv *xspi;
|
||||
struct resource *res;
|
||||
struct clk *clk;
|
||||
int irq, err;
|
||||
|
||||
|
@ -378,16 +377,13 @@ static int xlp_spi_probe(struct platform_device *pdev)
|
|||
if (!xspi)
|
||||
return -ENOMEM;
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
xspi->base = devm_ioremap_resource(&pdev->dev, res);
|
||||
xspi->base = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(xspi->base))
|
||||
return PTR_ERR(xspi->base);
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
if (irq < 0) {
|
||||
dev_err(&pdev->dev, "no IRQ resource found: %d\n", irq);
|
||||
if (irq < 0)
|
||||
return irq;
|
||||
}
|
||||
err = devm_request_irq(&pdev->dev, irq, xlp_spi_interrupt, 0,
|
||||
pdev->name, xspi);
|
||||
if (err) {
|
||||
|
|
|
@ -620,7 +620,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
|
|||
struct device *dev = &pdev->dev;
|
||||
struct device_node *np = dev->of_node;
|
||||
struct zynq_qspi *xqspi;
|
||||
struct resource *res;
|
||||
u32 num_cs;
|
||||
|
||||
ctlr = spi_alloc_master(&pdev->dev, sizeof(*xqspi));
|
||||
|
@ -630,8 +629,7 @@ static int zynq_qspi_probe(struct platform_device *pdev)
|
|||
xqspi = spi_controller_get_devdata(ctlr);
|
||||
xqspi->dev = dev;
|
||||
platform_set_drvdata(pdev, xqspi);
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(xqspi->regs)) {
|
||||
ret = PTR_ERR(xqspi->regs);
|
||||
goto remove_master;
|
||||
|
@ -671,7 +669,6 @@ static int zynq_qspi_probe(struct platform_device *pdev)
|
|||
xqspi->irq = platform_get_irq(pdev, 0);
|
||||
if (xqspi->irq <= 0) {
|
||||
ret = -ENXIO;
|
||||
dev_err(&pdev->dev, "irq resource not found\n");
|
||||
goto remove_master;
|
||||
}
|
||||
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynq_qspi_irq,
|
||||
|
|
|
@ -1016,7 +1016,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
|
|||
int ret = 0;
|
||||
struct spi_master *master;
|
||||
struct zynqmp_qspi *xqspi;
|
||||
struct resource *res;
|
||||
struct device *dev = &pdev->dev;
|
||||
|
||||
eemi_ops = zynqmp_pm_get_eemi_ops();
|
||||
|
@ -1031,8 +1030,7 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
|
|||
master->dev.of_node = pdev->dev.of_node;
|
||||
platform_set_drvdata(pdev, master);
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
|
||||
xqspi->regs = devm_ioremap_resource(&pdev->dev, res);
|
||||
xqspi->regs = devm_platform_ioremap_resource(pdev, 0);
|
||||
if (IS_ERR(xqspi->regs)) {
|
||||
ret = PTR_ERR(xqspi->regs);
|
||||
goto remove_master;
|
||||
|
@ -1077,7 +1075,6 @@ static int zynqmp_qspi_probe(struct platform_device *pdev)
|
|||
xqspi->irq = platform_get_irq(pdev, 0);
|
||||
if (xqspi->irq <= 0) {
|
||||
ret = -ENXIO;
|
||||
dev_err(dev, "irq resource not found\n");
|
||||
goto clk_dis_all;
|
||||
}
|
||||
ret = devm_request_irq(&pdev->dev, xqspi->irq, zynqmp_qspi_irq,
|
||||
|
|
|
@ -1265,8 +1265,9 @@ EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
|
|||
*/
|
||||
static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
|
||||
{
|
||||
unsigned long flags;
|
||||
struct spi_message *msg;
|
||||
bool was_busy = false;
|
||||
unsigned long flags;
|
||||
int ret;
|
||||
|
||||
/* Lock queue */
|
||||
|
@ -1325,10 +1326,10 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
|
|||
}
|
||||
|
||||
/* Extract head of queue */
|
||||
ctlr->cur_msg =
|
||||
list_first_entry(&ctlr->queue, struct spi_message, queue);
|
||||
msg = list_first_entry(&ctlr->queue, struct spi_message, queue);
|
||||
ctlr->cur_msg = msg;
|
||||
|
||||
list_del_init(&ctlr->cur_msg->queue);
|
||||
list_del_init(&msg->queue);
|
||||
if (ctlr->busy)
|
||||
was_busy = true;
|
||||
else
|
||||
|
@ -1361,7 +1362,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
|
|||
if (ctlr->auto_runtime_pm)
|
||||
pm_runtime_put(ctlr->dev.parent);
|
||||
|
||||
ctlr->cur_msg->status = ret;
|
||||
msg->status = ret;
|
||||
spi_finalize_current_message(ctlr);
|
||||
|
||||
mutex_unlock(&ctlr->io_mutex);
|
||||
|
@ -1369,28 +1370,28 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
|
|||
}
|
||||
}
|
||||
|
||||
trace_spi_message_start(ctlr->cur_msg);
|
||||
trace_spi_message_start(msg);
|
||||
|
||||
if (ctlr->prepare_message) {
|
||||
ret = ctlr->prepare_message(ctlr, ctlr->cur_msg);
|
||||
ret = ctlr->prepare_message(ctlr, msg);
|
||||
if (ret) {
|
||||
dev_err(&ctlr->dev, "failed to prepare message: %d\n",
|
||||
ret);
|
||||
ctlr->cur_msg->status = ret;
|
||||
msg->status = ret;
|
||||
spi_finalize_current_message(ctlr);
|
||||
goto out;
|
||||
}
|
||||
ctlr->cur_msg_prepared = true;
|
||||
}
|
||||
|
||||
ret = spi_map_msg(ctlr, ctlr->cur_msg);
|
||||
ret = spi_map_msg(ctlr, msg);
|
||||
if (ret) {
|
||||
ctlr->cur_msg->status = ret;
|
||||
msg->status = ret;
|
||||
spi_finalize_current_message(ctlr);
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = ctlr->transfer_one_message(ctlr, ctlr->cur_msg);
|
||||
ret = ctlr->transfer_one_message(ctlr, msg);
|
||||
if (ret) {
|
||||
dev_err(&ctlr->dev,
|
||||
"failed to transfer one message from queue\n");
|
||||
|
@ -1434,7 +1435,7 @@ static void spi_pump_messages(struct kthread_work *work)
|
|||
*/
|
||||
static void spi_set_thread_rt(struct spi_controller *ctlr)
|
||||
{
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
|
||||
struct sched_param param = { .sched_priority = MAX_RT_PRIO / 2 };
|
||||
|
||||
dev_info(&ctlr->dev,
|
||||
"will run message pump with realtime priority\n");
|
||||
|
@ -2105,8 +2106,8 @@ static int match_true(struct device *dev, void *data)
|
|||
return 1;
|
||||
}
|
||||
|
||||
static ssize_t spi_slave_show(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
static ssize_t slave_show(struct device *dev, struct device_attribute *attr,
|
||||
char *buf)
|
||||
{
|
||||
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
|
||||
dev);
|
||||
|
@ -2117,9 +2118,8 @@ static ssize_t spi_slave_show(struct device *dev,
|
|||
child ? to_spi_device(child)->modalias : NULL);
|
||||
}
|
||||
|
||||
static ssize_t spi_slave_store(struct device *dev,
|
||||
struct device_attribute *attr, const char *buf,
|
||||
size_t count)
|
||||
static ssize_t slave_store(struct device *dev, struct device_attribute *attr,
|
||||
const char *buf, size_t count)
|
||||
{
|
||||
struct spi_controller *ctlr = container_of(dev, struct spi_controller,
|
||||
dev);
|
||||
|
@ -2157,7 +2157,7 @@ static ssize_t spi_slave_store(struct device *dev,
|
|||
return count;
|
||||
}
|
||||
|
||||
static DEVICE_ATTR(slave, 0644, spi_slave_show, spi_slave_store);
|
||||
static DEVICE_ATTR_RW(slave);
|
||||
|
||||
static struct attribute *spi_slave_attrs[] = {
|
||||
&dev_attr_slave.attr,
|
||||
|
@ -2188,8 +2188,10 @@ extern struct class spi_slave_class; /* dummy */
|
|||
* __spi_alloc_controller - allocate an SPI master or slave controller
|
||||
* @dev: the controller, possibly using the platform_bus
|
||||
* @size: how much zeroed driver-private data to allocate; the pointer to this
|
||||
* memory is in the driver_data field of the returned device,
|
||||
* accessible with spi_controller_get_devdata().
|
||||
* memory is in the driver_data field of the returned device, accessible
|
||||
* with spi_controller_get_devdata(); the memory is cacheline aligned;
|
||||
* drivers granting DMA access to portions of their private data need to
|
||||
* round up @size using ALIGN(size, dma_get_cache_alignment()).
|
||||
* @slave: flag indicating whether to allocate an SPI master (false) or SPI
|
||||
* slave (true) controller
|
||||
* Context: can sleep
|
||||
|
@ -2211,11 +2213,12 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
|
|||
unsigned int size, bool slave)
|
||||
{
|
||||
struct spi_controller *ctlr;
|
||||
size_t ctlr_size = ALIGN(sizeof(*ctlr), dma_get_cache_alignment());
|
||||
|
||||
if (!dev)
|
||||
return NULL;
|
||||
|
||||
ctlr = kzalloc(size + sizeof(*ctlr), GFP_KERNEL);
|
||||
ctlr = kzalloc(size + ctlr_size, GFP_KERNEL);
|
||||
if (!ctlr)
|
||||
return NULL;
|
||||
|
||||
|
@ -2229,14 +2232,14 @@ struct spi_controller *__spi_alloc_controller(struct device *dev,
|
|||
ctlr->dev.class = &spi_master_class;
|
||||
ctlr->dev.parent = dev;
|
||||
pm_suspend_ignore_children(&ctlr->dev, true);
|
||||
spi_controller_set_devdata(ctlr, &ctlr[1]);
|
||||
spi_controller_set_devdata(ctlr, (void *)ctlr + ctlr_size);
|
||||
|
||||
return ctlr;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(__spi_alloc_controller);
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static int of_spi_register_master(struct spi_controller *ctlr)
|
||||
static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
|
||||
{
|
||||
int nb, i, *cs;
|
||||
struct device_node *np = ctlr->dev.of_node;
|
||||
|
@ -2269,7 +2272,7 @@ static int of_spi_register_master(struct spi_controller *ctlr)
|
|||
return 0;
|
||||
}
|
||||
#else
|
||||
static int of_spi_register_master(struct spi_controller *ctlr)
|
||||
static int of_spi_get_gpio_numbers(struct spi_controller *ctlr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -2456,7 +2459,7 @@ int spi_register_controller(struct spi_controller *ctlr)
|
|||
ctlr->mode_bits |= SPI_CS_HIGH;
|
||||
} else {
|
||||
/* Legacy code path for GPIOs from DT */
|
||||
status = of_spi_register_master(ctlr);
|
||||
status = of_spi_get_gpio_numbers(ctlr);
|
||||
if (status)
|
||||
return status;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user