Merge branch 'spi-5.1' into spi-next

This commit is contained in:
Mark Brown 2019-03-04 15:32:51 +00:00
commit 14dbfb417b
No known key found for this signature in database
GPG Key ID: 24D68B725D5487D0
66 changed files with 5003 additions and 2431 deletions

View File

@ -1,14 +1,19 @@
* Atmel Quad Serial Peripheral Interface (QSPI)
Required properties:
- compatible: Should be "atmel,sama5d2-qspi".
- compatible: Should be one of the following:
- "atmel,sama5d2-qspi"
- "microchip,sam9x60-qspi"
- reg: Should contain the locations and lengths of the base registers
and the mapped memory.
- reg-names: Should contain the resource reg names:
- qspi_base: configuration register address space
- qspi_mmap: memory mapped address space
- interrupts: Should contain the interrupt for the device.
- clocks: The phandle of the clock needed by the QSPI controller.
- clocks: Should reference the peripheral clock and the QSPI system
clock if available.
- clock-names: Should contain "pclk" for the peripheral clock and "qspick"
for the system clock when available.
- #address-cells: Should be <1>.
- #size-cells: Should be <0>.
@ -19,7 +24,8 @@ spi@f0020000 {
reg = <0xf0020000 0x100>, <0xd0000000 0x8000000>;
reg-names = "qspi_base", "qspi_mmap";
interrupts = <52 IRQ_TYPE_LEVEL_HIGH 7>;
clocks = <&spi0_clk>;
clocks = <&pmc PMC_TYPE_PERIPHERAL 52>;
clock-names = "pclk";
#address-cells = <1>;
#size-cells = <0>;
pinctrl-names = "default";

View File

@ -10,6 +10,7 @@ Required properties:
- "fsl,imx35-cspi" for SPI compatible with the one integrated on i.MX35
- "fsl,imx51-ecspi" for SPI compatible with the one integrated on i.MX51
- "fsl,imx53-ecspi" for SPI compatible with the one integrated on i.MX53 and later Soc
- "fsl,imx8mq-ecspi" for SPI compatible with the one integrated on i.MX8M
- reg : Offset and length of the register set for the device
- interrupts : Should contain CSPI/eCSPI interrupt
- clocks : Clock specifiers for both ipg and per clocks.

View File

@ -14,15 +14,13 @@ Required properties:
- clocks : The clocks needed by the QuadSPI controller
- clock-names : Should contain the name of the clocks: "qspi_en" and "qspi".
Optional properties:
- fsl,qspi-has-second-chip: The controller has two buses, bus A and bus B.
Each bus can be connected with two NOR flashes.
Most of the time, each bus only has one NOR flash
connected, this is the default case.
But if there are two NOR flashes connected to the
bus, you should enable this property.
(Please check the board's schematic.)
- big-endian : That means the IP register is big endian
Required SPI slave node properties:
- reg: There are two buses (A and B) with two chip selects each.
This encodes to which bus and CS the flash is connected:
<0>: Bus A, CS 0
<1>: Bus A, CS 1
<2>: Bus B, CS 0
<3>: Bus B, CS 1
Example:
@ -40,7 +38,7 @@ qspi0: quadspi@40044000 {
};
};
Example showing the usage of two SPI NOR devices:
Example showing the usage of two SPI NOR devices on bus A:
&qspi2 {
pinctrl-names = "default";

View File

@ -0,0 +1,39 @@
* NXP Flex Serial Peripheral Interface (FSPI)
Required properties:
- compatible : Should be "nxp,lx2160a-fspi"
- reg : First contains the register location and length,
Second contains the memory mapping address and length
- reg-names : Should contain the resource reg names:
- fspi_base: configuration register address space
- fspi_mmap: memory mapped address space
- interrupts : Should contain the interrupt for the device
Required SPI slave node properties:
- reg : There are two buses (A and B) with two chip selects each.
This encodes to which bus and CS the flash is connected:
- <0>: Bus A, CS 0
- <1>: Bus A, CS 1
- <2>: Bus B, CS 0
- <3>: Bus B, CS 1
Example showing the usage of two SPI NOR slave devices on bus A:
fspi0: spi@20c0000 {
compatible = "nxp,lx2160a-fspi";
reg = <0x0 0x20c0000 0x0 0x10000>, <0x0 0x20000000 0x0 0x10000000>;
reg-names = "fspi_base", "fspi_mmap";
interrupts = <0 25 0x4>; /* Level high type */
clocks = <&clockgen 4 3>, <&clockgen 4 3>;
clock-names = "fspi_en", "fspi";
mt35xu512aba0: flash@0 {
reg = <0>;
....
};
mt35xu512aba1: flash@1 {
reg = <1>;
....
};
};

View File

@ -0,0 +1,37 @@
SiFive SPI controller Device Tree Bindings
------------------------------------------
Required properties:
- compatible : Should be "sifive,<chip>-spi" and "sifive,spi<version>".
Supported compatible strings are:
"sifive,fu540-c000-spi" for the SiFive SPI v0 as integrated
onto the SiFive FU540 chip, and "sifive,spi0" for the SiFive
SPI v0 IP block with no chip integration tweaks.
Please refer to sifive-blocks-ip-versioning.txt for details
- reg : Physical base address and size of SPI registers map
A second (optional) range can indicate memory mapped flash
- interrupts : Must contain one entry
- interrupt-parent : Must be core interrupt controller
- clocks : Must reference the frequency given to the controller
- #address-cells : Must be '1', indicating which CS to use
- #size-cells : Must be '0'
Optional properties:
- sifive,fifo-depth : Depth of hardware queues; defaults to 8
- sifive,max-bits-per-word : Maximum bits per word; defaults to 8
SPI RTL that corresponds to the IP block version numbers can be found here:
https://github.com/sifive/sifive-blocks/tree/master/src/main/scala/devices/spi
Example:
spi: spi@10040000 {
compatible = "sifive,fu540-c000-spi", "sifive,spi0";
reg = <0x0 0x10040000 0x0 0x1000 0x0 0x20000000 0x0 0x10000000>;
interrupt-parent = <&plic>;
interrupts = <51>;
clocks = <&tlclk>;
#address-cells = <1>;
#size-cells = <0>;
sifive,fifo-depth = <8>;
sifive,max-bits-per-word = <8>;
};

View File

@ -14,6 +14,11 @@ Required properties:
address on the SPI bus. Should be set to 1.
- #size-cells: Should be set to 0.
Optional properties:
dma-names: Should contain names of the SPI used DMA channel.
dmas: Should contain DMA channels and DMA slave ids which the SPI used
sorted in the same order as the dma-names property.
Example:
spi0: spi@70a00000{
compatible = "sprd,sc9860-spi";
@ -21,6 +26,8 @@ spi0: spi@70a00000{
interrupts = <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "spi", "source","enable";
clocks = <&clk_spi0>, <&ext_26m>, <&clk_ap_apb_gates 5>;
dma-names = "rx_chn", "tx_chn";
dmas = <&apdma 11 11>, <&apdma 12 12>;
#address-cells = <1>;
#size-cells = <0>;
};

View File

@ -7,7 +7,9 @@ from 4 to 32-bit data size. Although it can be configured as master or slave,
only master is supported by the driver.
Required properties:
- compatible: Must be "st,stm32h7-spi".
- compatible: Should be one of:
"st,stm32h7-spi"
"st,stm32f4-spi"
- reg: Offset and length of the device's register set.
- interrupts: Must contain the interrupt id.
- clocks: Must contain an entry for spiclk (which feeds the internal clock
@ -30,8 +32,9 @@ Child nodes represent devices on the SPI bus
See ../spi/spi-bus.txt
Optional properties:
- st,spi-midi-ns: (Master Inter-Data Idleness) minimum time delay in
nanoseconds inserted between two consecutive data frames.
- st,spi-midi-ns: Only for STM32H7, (Master Inter-Data Idleness) minimum time
delay in nanoseconds inserted between two consecutive data
frames.
Example:

View File

@ -21,15 +21,15 @@ Typically a SPI master is defined in the arch/.../mach-*/board-*.c as a
"platform device". The master configuration is passed to the driver via a table
found in include/linux/spi/pxa2xx_spi.h:
struct pxa2xx_spi_master {
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
};
The "pxa2xx_spi_master.num_chipselect" field is used to determine the number of
The "pxa2xx_spi_controller.num_chipselect" field is used to determine the number of
slave device (chips) attached to this SPI master.
The "pxa2xx_spi_master.enable_dma" field informs the driver that SSP DMA should
The "pxa2xx_spi_controller.enable_dma" field informs the driver that SSP DMA should
be used. This caused the driver to acquire two DMA channels: rx_channel and
tx_channel. The rx_channel has a higher DMA service priority the tx_channel.
See the "PXA2xx Developer Manual" section "DMA Controller".
@ -51,7 +51,7 @@ static struct resource pxa_spi_nssp_resources[] = {
},
};
static struct pxa2xx_spi_master pxa_nssp_master_info = {
static struct pxa2xx_spi_controller pxa_nssp_master_info = {
.num_chipselect = 1, /* Matches the number of chips attached to NSSP */
.enable_dma = 1, /* Enables NSSP DMA */
};
@ -206,7 +206,7 @@ DMA and PIO I/O Support
-----------------------
The pxa2xx_spi driver supports both DMA and interrupt driven PIO message
transfers. The driver defaults to PIO mode and DMA transfers must be enabled
by setting the "enable_dma" flag in the "pxa2xx_spi_master" structure. The DMA
by setting the "enable_dma" flag in the "pxa2xx_spi_controller" structure. The DMA
mode supports both coherent and stream based DMA mappings.
The following logic is used to determine the type of I/O to be used on

View File

@ -6105,9 +6105,9 @@ F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt
FREESCALE QUAD SPI DRIVER
M: Han Xu <han.xu@nxp.com>
L: linux-mtd@lists.infradead.org
L: linux-spi@vger.kernel.org
S: Maintained
F: drivers/mtd/spi-nor/fsl-quadspi.c
F: drivers/spi/spi-fsl-qspi.c
FREESCALE QUICC ENGINE LIBRARY
M: Qiang Zhao <qiang.zhao@nxp.com>
@ -10944,6 +10944,14 @@ F: lib/objagg.c
F: lib/test_objagg.c
F: include/linux/objagg.h
NXP FSPI DRIVER
R: Yogesh Gaur <yogeshgaur.83@gmail.com>
M: Ashish Kumar <ashish.kumar@nxp.com>
L: linux-spi@vger.kernel.org
S: Maintained
F: drivers/spi/spi-nxp-fspi.c
F: Documentation/devicetree/bindings/spi/spi-nxp-fspi.txt
OBJTOOL
M: Josh Poimboeuf <jpoimboe@redhat.com>
M: Peter Zijlstra <peterz@infradead.org>

View File

@ -98,7 +98,7 @@ static unsigned long cmx255_pin_config[] = {
};
#if defined(CONFIG_SPI_PXA2XX)
static struct pxa2xx_spi_master pxa_ssp_master_info = {
static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};

View File

@ -313,7 +313,7 @@ static inline void cmx270_init_mmc(void) {}
#endif
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
static struct pxa2xx_spi_master cm_x270_spi_info = {
static struct pxa2xx_spi_controller cm_x270_spi_info = {
.num_chipselect = 1,
.enable_dma = 1,
};

View File

@ -530,7 +530,7 @@ static struct pxa2xx_udc_mach_info udc_info __initdata = {
};
#if IS_ENABLED(CONFIG_SPI_PXA2XX)
static struct pxa2xx_spi_master corgi_spi_info = {
static struct pxa2xx_spi_controller corgi_spi_info = {
.num_chipselect = 3,
};

View File

@ -1065,7 +1065,7 @@ struct platform_device pxa93x_device_gpio = {
/* pxa2xx-spi platform-device ID equals respective SSP platform-device ID + 1.
* See comment in arch/arm/mach-pxa/ssp.c::ssp_probe() */
void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info)
void __init pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info)
{
struct platform_device *pd;

View File

@ -689,7 +689,7 @@ static inline void em_x270_init_lcd(void) {}
#endif
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
static struct pxa2xx_spi_master em_x270_spi_info = {
static struct pxa2xx_spi_controller em_x270_spi_info = {
.num_chipselect = 1,
};
@ -703,7 +703,7 @@ static struct tdo24m_platform_data em_x270_tdo24m_pdata = {
.model = TDO35S,
};
static struct pxa2xx_spi_master em_x270_spi_2_info = {
static struct pxa2xx_spi_controller em_x270_spi_2_info = {
.num_chipselect = 1,
.enable_dma = 1,
};

View File

@ -629,7 +629,7 @@ static struct spi_board_info tsc2046_board_info[] __initdata = {
},
};
static struct pxa2xx_spi_master pxa_ssp2_master_info = {
static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
.num_chipselect = 1,
.enable_dma = 1,
};

View File

@ -115,12 +115,12 @@ static struct spi_board_info mcp251x_board_info[] = {
}
};
static struct pxa2xx_spi_master pxa_ssp3_spi_master_info = {
static struct pxa2xx_spi_controller pxa_ssp3_spi_master_info = {
.num_chipselect = 2,
.enable_dma = 1
};
static struct pxa2xx_spi_master pxa_ssp4_spi_master_info = {
static struct pxa2xx_spi_controller pxa_ssp4_spi_master_info = {
.num_chipselect = 2,
.enable_dma = 1
};

View File

@ -191,7 +191,7 @@ static inline void littleton_init_lcd(void) {};
#endif /* CONFIG_FB_PXA || CONFIG_FB_PXA_MODULE */
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
static struct pxa2xx_spi_master littleton_spi_info = {
static struct pxa2xx_spi_controller littleton_spi_info = {
.num_chipselect = 1,
};

View File

@ -197,7 +197,7 @@ static struct platform_device sa1111_device = {
* (to J5) and poking board registers (as done below). Else it's only useful
* for the temperature sensors.
*/
static struct pxa2xx_spi_master pxa_ssp_master_info = {
static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};

View File

@ -932,7 +932,7 @@ struct pxa2xx_spi_chip tsc2046_chip_info = {
.gpio_cs = GPIO14_MAGICIAN_TSC2046_CS,
};
static struct pxa2xx_spi_master magician_spi_info = {
static struct pxa2xx_spi_controller magician_spi_info = {
.num_chipselect = 1,
.enable_dma = 1,
};

View File

@ -132,7 +132,7 @@ static struct platform_device smc91x_device = {
/*
* SPI host and devices
*/
static struct pxa2xx_spi_master pxa_ssp_master_info = {
static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};

View File

@ -196,7 +196,7 @@ struct platform_device poodle_locomo_device = {
EXPORT_SYMBOL(poodle_locomo_device);
#if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE)
static struct pxa2xx_spi_master poodle_spi_info = {
static struct pxa2xx_spi_controller poodle_spi_info = {
.num_chipselect = 1,
};

View File

@ -572,7 +572,7 @@ static struct spi_board_info spitz_spi_devices[] = {
},
};
static struct pxa2xx_spi_master spitz_spi_info = {
static struct pxa2xx_spi_controller spitz_spi_info = {
.num_chipselect = 3,
};

View File

@ -337,15 +337,15 @@ static struct platform_device stargate2_flash_device = {
.num_resources = 1,
};
static struct pxa2xx_spi_master pxa_ssp_master_0_info = {
static struct pxa2xx_spi_controller pxa_ssp_master_0_info = {
.num_chipselect = 1,
};
static struct pxa2xx_spi_master pxa_ssp_master_1_info = {
static struct pxa2xx_spi_controller pxa_ssp_master_1_info = {
.num_chipselect = 1,
};
static struct pxa2xx_spi_master pxa_ssp_master_2_info = {
static struct pxa2xx_spi_controller pxa_ssp_master_2_info = {
.num_chipselect = 1,
};

View File

@ -813,7 +813,7 @@ static struct platform_device tosa_bt_device = {
.dev.platform_data = &tosa_bt_data,
};
static struct pxa2xx_spi_master pxa_ssp_master_info = {
static struct pxa2xx_spi_controller pxa_ssp_master_info = {
.num_chipselect = 1,
};

View File

@ -607,12 +607,12 @@ static struct spi_board_info spi_board_info[] __initdata = {
},
};
static struct pxa2xx_spi_master pxa_ssp1_master_info = {
static struct pxa2xx_spi_controller pxa_ssp1_master_info = {
.num_chipselect = 1,
.enable_dma = 1,
};
static struct pxa2xx_spi_master pxa_ssp2_master_info = {
static struct pxa2xx_spi_controller pxa_ssp2_master_info = {
.num_chipselect = 1,
};

View File

@ -391,7 +391,7 @@ static struct platform_device zeus_sram_device = {
};
/* SPI interface on SSP3 */
static struct pxa2xx_spi_master pxa2xx_spi_ssp3_master_info = {
static struct pxa2xx_spi_controller pxa2xx_spi_ssp3_master_info = {
.num_chipselect = 1,
.enable_dma = 1,
};

View File

@ -13,7 +13,7 @@
#define _ATH79_DEV_SPI_H
#include <linux/spi/spi.h>
#include <asm/mach-ath79/ath79_spi_platform.h>
#include <linux/platform_data/spi-ath79.h>
void ath79_register_spi(struct ath79_spi_platform_data *pdata,
struct spi_board_info const *info,

View File

@ -125,7 +125,7 @@ static void of_gpio_flags_quirks(struct device_node *np,
for_each_child_of_node(np, child) {
ret = of_property_read_u32(child, "reg", &cs);
if (!ret)
if (ret)
continue;
if (cs == index) {
/*

View File

@ -50,15 +50,6 @@ config SPI_CADENCE_QUADSPI
device with a Cadence QSPI controller and want to access the
Flash as an MTD device.
config SPI_FSL_QUADSPI
tristate "Freescale Quad SPI controller"
depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
depends on HAS_IOMEM
help
This enables support for the Quad SPI controller in master mode.
This controller does not support generic SPI. It only supports
SPI NOR.
config SPI_HISI_SFC
tristate "Hisilicon SPI-NOR Flash Controller(SFC)"
depends on ARCH_HISI || COMPILE_TEST

View File

@ -2,7 +2,6 @@
obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o
obj-$(CONFIG_SPI_ASPEED_SMC) += aspeed-smc.o
obj-$(CONFIG_SPI_CADENCE_QUADSPI) += cadence-quadspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o
obj-$(CONFIG_SPI_HISI_SFC) += hisi-sfc.o
obj-$(CONFIG_MTD_MT81xx_NOR) += mtk-quadspi.o
obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o

File diff suppressed because it is too large Load Diff

View File

@ -63,7 +63,7 @@ config SPI_ALTERA
config SPI_ATH79
tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
depends on ATH79 && GPIOLIB
depends on ATH79 || COMPILE_TEST
select SPI_BITBANG
help
This enables support for the SPI controller present on the
@ -268,6 +268,27 @@ config SPI_FSL_LPSPI
help
This enables Freescale i.MX LPSPI controllers in master mode.
config SPI_FSL_QUADSPI
tristate "Freescale QSPI controller"
depends on ARCH_MXC || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
depends on HAS_IOMEM
help
This enables support for the Quad SPI controller in master mode.
Up to four flash chips can be connected on two buses with two
chipselects each.
This controller does not support generic SPI messages. It only
supports the high-level SPI memory interface.
config SPI_NXP_FLEXSPI
tristate "NXP Flex SPI controller"
depends on ARCH_LAYERSCAPE || HAS_IOMEM
help
This enables support for the Flex SPI controller in master mode.
Up to four slave devices can be connected on two buses with two
chipselects each.
This controller does not support generic SPI messages and only
supports the high-level SPI memory interface.
config SPI_GPIO
tristate "GPIO-based bitbanging SPI Master"
depends on GPIOLIB || COMPILE_TEST
@ -296,8 +317,7 @@ config SPI_IMX
depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
help
This enables using the Freescale i.MX SPI controllers in master
mode.
This enables support for the Freescale i.MX SPI controllers.
config SPI_JCORE
tristate "J-Core SPI Master"
@ -372,7 +392,7 @@ config SPI_FSL_DSPI
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
mode. VF610 platform uses the controller.
mode. VF610, LS1021A and ColdFire platforms uses the controller.
config SPI_FSL_ESPI
tristate "Freescale eSPI controller"
@ -631,6 +651,12 @@ config SPI_SH_HSPI
help
SPI driver for SuperH HSPI blocks.
config SPI_SIFIVE
tristate "SiFive SPI controller"
depends on HAS_IOMEM
help
This exposes the SPI controller IP from SiFive.
config SPI_SIRF
tristate "CSR SiRFprimaII SPI controller"
depends on SIRF_DMA
@ -665,7 +691,7 @@ config SPI_STM32
tristate "STMicroelectronics STM32 SPI controller"
depends on ARCH_STM32 || COMPILE_TEST
help
SPI driver for STMicroelectonics STM32 SoCs.
SPI driver for STMicroelectronics STM32 SoCs.
STM32 SPI controller supports DMA and PIO modes. When DMA
is not available, the driver automatically falls back to

View File

@ -45,6 +45,7 @@ obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o
obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o
obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
@ -63,6 +64,7 @@ obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
obj-$(CONFIG_SPI_MXS) += spi-mxs.o
obj-$(CONFIG_SPI_NPCM_PSPI) += spi-npcm-pspi.o
obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
obj-$(CONFIG_SPI_NXP_FLEXSPI) += spi-nxp-fspi.o
obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
spi-octeon-objs := spi-cavium.o spi-cavium-octeon.o
obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
@ -93,6 +95,7 @@ obj-$(CONFIG_SPI_SH) += spi-sh.o
obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
obj-$(CONFIG_SPI_SIFIVE) += spi-sifive.o
obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
obj-$(CONFIG_SPI_SLAVE_MT27XX) += spi-slave-mt27xx.o
obj-$(CONFIG_SPI_SPRD) += spi-sprd.o

View File

@ -1,3 +1,4 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Driver for Atmel QSPI Controller
*
@ -7,31 +8,19 @@
* Author: Cyrille Pitchen <cyrille.pitchen@atmel.com>
* Author: Piotr Bugalski <bugalski.piotr@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*
* This driver is based on drivers/mtd/spi-nor/fsl-quadspi.c from Freescale.
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/spi/spi-mem.h>
/* QSPI register offsets */
@ -47,7 +36,9 @@
#define QSPI_IAR 0x0030 /* Instruction Address Register */
#define QSPI_ICR 0x0034 /* Instruction Code Register */
#define QSPI_WICR 0x0034 /* Write Instruction Code Register */
#define QSPI_IFR 0x0038 /* Instruction Frame Register */
#define QSPI_RICR 0x003C /* Read Instruction Code Register */
#define QSPI_SMR 0x0040 /* Scrambling Mode Register */
#define QSPI_SKR 0x0044 /* Scrambling Key Register */
@ -100,7 +91,7 @@
#define QSPI_SCR_DLYBS_MASK GENMASK(23, 16)
#define QSPI_SCR_DLYBS(n) (((n) << 16) & QSPI_SCR_DLYBS_MASK)
/* Bitfields in QSPI_ICR (Instruction Code Register) */
/* Bitfields in QSPI_ICR (Read/Write Instruction Code Register) */
#define QSPI_ICR_INST_MASK GENMASK(7, 0)
#define QSPI_ICR_INST(inst) (((inst) << 0) & QSPI_ICR_INST_MASK)
#define QSPI_ICR_OPT_MASK GENMASK(23, 16)
@ -125,14 +116,12 @@
#define QSPI_IFR_OPTL_4BIT (2 << 8)
#define QSPI_IFR_OPTL_8BIT (3 << 8)
#define QSPI_IFR_ADDRL BIT(10)
#define QSPI_IFR_TFRTYP_MASK GENMASK(13, 12)
#define QSPI_IFR_TFRTYP_TRSFR_READ (0 << 12)
#define QSPI_IFR_TFRTYP_TRSFR_READ_MEM (1 << 12)
#define QSPI_IFR_TFRTYP_TRSFR_WRITE (2 << 12)
#define QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM (3 << 13)
#define QSPI_IFR_TFRTYP_MEM BIT(12)
#define QSPI_IFR_SAMA5D2_WRITE_TRSFR BIT(13)
#define QSPI_IFR_CRM BIT(14)
#define QSPI_IFR_NBDUM_MASK GENMASK(20, 16)
#define QSPI_IFR_NBDUM(n) (((n) << 16) & QSPI_IFR_NBDUM_MASK)
#define QSPI_IFR_APBTFRTYP_READ BIT(24) /* Defined in SAM9X60 */
/* Bitfields in QSPI_SMR (Scrambling Mode Register) */
#define QSPI_SMR_SCREN BIT(0)
@ -148,24 +137,31 @@
#define QSPI_WPSR_WPVSRC_MASK GENMASK(15, 8)
#define QSPI_WPSR_WPVSRC(src) (((src) << 8) & QSPI_WPSR_WPVSRC)
struct atmel_qspi_caps {
bool has_qspick;
bool has_ricr;
};
struct atmel_qspi {
void __iomem *regs;
void __iomem *mem;
struct clk *clk;
struct clk *pclk;
struct clk *qspick;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
u32 pending;
u32 mr;
struct completion cmd_completion;
};
struct qspi_mode {
struct atmel_qspi_mode {
u8 cmd_buswidth;
u8 addr_buswidth;
u8 data_buswidth;
u32 config;
};
static const struct qspi_mode sama5d2_qspi_modes[] = {
static const struct atmel_qspi_mode atmel_qspi_modes[] = {
{ 1, 1, 1, QSPI_IFR_WIDTH_SINGLE_BIT_SPI },
{ 1, 1, 2, QSPI_IFR_WIDTH_DUAL_OUTPUT },
{ 1, 1, 4, QSPI_IFR_WIDTH_QUAD_OUTPUT },
@ -175,19 +171,8 @@ static const struct qspi_mode sama5d2_qspi_modes[] = {
{ 4, 4, 4, QSPI_IFR_WIDTH_QUAD_CMD },
};
/* Register access functions */
static inline u32 qspi_readl(struct atmel_qspi *aq, u32 reg)
{
return readl_relaxed(aq->regs + reg);
}
static inline void qspi_writel(struct atmel_qspi *aq, u32 reg, u32 value)
{
writel_relaxed(value, aq->regs + reg);
}
static inline bool is_compatible(const struct spi_mem_op *op,
const struct qspi_mode *mode)
static inline bool atmel_qspi_is_compatible(const struct spi_mem_op *op,
const struct atmel_qspi_mode *mode)
{
if (op->cmd.buswidth != mode->cmd_buswidth)
return false;
@ -201,21 +186,21 @@ static inline bool is_compatible(const struct spi_mem_op *op,
return true;
}
static int find_mode(const struct spi_mem_op *op)
static int atmel_qspi_find_mode(const struct spi_mem_op *op)
{
u32 i;
for (i = 0; i < ARRAY_SIZE(sama5d2_qspi_modes); i++)
if (is_compatible(op, &sama5d2_qspi_modes[i]))
for (i = 0; i < ARRAY_SIZE(atmel_qspi_modes); i++)
if (atmel_qspi_is_compatible(op, &atmel_qspi_modes[i]))
return i;
return -1;
return -ENOTSUPP;
}
static bool atmel_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
if (find_mode(op) < 0)
if (atmel_qspi_find_mode(op) < 0)
return false;
/* special case not supported by hardware */
@ -226,29 +211,37 @@ static bool atmel_qspi_supports_op(struct spi_mem *mem,
return true;
}
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
static int atmel_qspi_set_cfg(struct atmel_qspi *aq,
const struct spi_mem_op *op, u32 *offset)
{
struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
int mode;
u32 iar, icr, ifr;
u32 dummy_cycles = 0;
u32 iar, icr, ifr, sr;
int err = 0;
int mode;
iar = 0;
icr = QSPI_ICR_INST(op->cmd.opcode);
ifr = QSPI_IFR_INSTEN;
qspi_writel(aq, QSPI_MR, QSPI_MR_SMM);
mode = find_mode(op);
mode = atmel_qspi_find_mode(op);
if (mode < 0)
return -ENOTSUPP;
ifr |= sama5d2_qspi_modes[mode].config;
return mode;
ifr |= atmel_qspi_modes[mode].config;
if (op->dummy.buswidth && op->dummy.nbytes)
dummy_cycles = op->dummy.nbytes * 8 / op->dummy.buswidth;
/*
* The controller allows 24 and 32-bit addressing while NAND-flash
* requires 16-bit long. Handling 8-bit long addresses is done using
* the option field. For the 16-bit addresses, the workaround depends
* of the number of requested dummy bits. If there are 8 or more dummy
* cycles, the address is shifted and sent with the first dummy byte.
* Otherwise opcode is disabled and the first byte of the address
* contains the command opcode (works only if the opcode and address
* use the same buswidth). The limitation is when the 16-bit address is
* used without enough dummy cycles and the opcode is using a different
* buswidth than the address.
*/
if (op->addr.buswidth) {
switch (op->addr.nbytes) {
case 0:
@ -282,6 +275,9 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
}
}
/* offset of the data access in the QSPI memory space */
*offset = iar;
/* Set number of dummy cycles */
if (dummy_cycles)
ifr |= QSPI_IFR_NBDUM(dummy_cycles);
@ -290,49 +286,82 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
if (op->data.nbytes)
ifr |= QSPI_IFR_DATAEN;
if (op->data.dir == SPI_MEM_DATA_IN && op->data.nbytes)
ifr |= QSPI_IFR_TFRTYP_TRSFR_READ;
else
ifr |= QSPI_IFR_TFRTYP_TRSFR_WRITE;
/*
* If the QSPI controller is set in regular SPI mode, set it in
* Serial Memory Mode (SMM).
*/
if (aq->mr != QSPI_MR_SMM) {
writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
aq->mr = QSPI_MR_SMM;
}
/* Clear pending interrupts */
(void)qspi_readl(aq, QSPI_SR);
(void)readl_relaxed(aq->regs + QSPI_SR);
/* Set QSPI Instruction Frame registers */
qspi_writel(aq, QSPI_IAR, iar);
qspi_writel(aq, QSPI_ICR, icr);
qspi_writel(aq, QSPI_IFR, ifr);
if (aq->caps->has_ricr) {
if (!op->addr.nbytes && op->data.dir == SPI_MEM_DATA_IN)
ifr |= QSPI_IFR_APBTFRTYP_READ;
/* Set QSPI Instruction Frame registers */
writel_relaxed(iar, aq->regs + QSPI_IAR);
if (op->data.dir == SPI_MEM_DATA_IN)
writel_relaxed(icr, aq->regs + QSPI_RICR);
else
writel_relaxed(icr, aq->regs + QSPI_WICR);
writel_relaxed(ifr, aq->regs + QSPI_IFR);
} else {
if (op->data.dir == SPI_MEM_DATA_OUT)
ifr |= QSPI_IFR_SAMA5D2_WRITE_TRSFR;
/* Set QSPI Instruction Frame registers */
writel_relaxed(iar, aq->regs + QSPI_IAR);
writel_relaxed(icr, aq->regs + QSPI_ICR);
writel_relaxed(ifr, aq->regs + QSPI_IFR);
}
return 0;
}
static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct atmel_qspi *aq = spi_controller_get_devdata(mem->spi->master);
u32 sr, offset;
int err;
err = atmel_qspi_set_cfg(aq, op, &offset);
if (err)
return err;
/* Skip to the final steps if there is no data */
if (op->data.nbytes) {
/* Dummy read of QSPI_IFR to synchronize APB and AHB accesses */
(void)qspi_readl(aq, QSPI_IFR);
(void)readl_relaxed(aq->regs + QSPI_IFR);
/* Send/Receive data */
if (op->data.dir == SPI_MEM_DATA_IN)
_memcpy_fromio(op->data.buf.in,
aq->mem + iar, op->data.nbytes);
_memcpy_fromio(op->data.buf.in, aq->mem + offset,
op->data.nbytes);
else
_memcpy_toio(aq->mem + iar,
op->data.buf.out, op->data.nbytes);
_memcpy_toio(aq->mem + offset, op->data.buf.out,
op->data.nbytes);
/* Release the chip-select */
qspi_writel(aq, QSPI_CR, QSPI_CR_LASTXFER);
writel_relaxed(QSPI_CR_LASTXFER, aq->regs + QSPI_CR);
}
/* Poll INSTRuction End status */
sr = qspi_readl(aq, QSPI_SR);
sr = readl_relaxed(aq->regs + QSPI_SR);
if ((sr & QSPI_SR_CMD_COMPLETED) == QSPI_SR_CMD_COMPLETED)
return err;
/* Wait for INSTRuction End interrupt */
reinit_completion(&aq->cmd_completion);
aq->pending = sr & QSPI_SR_CMD_COMPLETED;
qspi_writel(aq, QSPI_IER, QSPI_SR_CMD_COMPLETED);
writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IER);
if (!wait_for_completion_timeout(&aq->cmd_completion,
msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
qspi_writel(aq, QSPI_IDR, QSPI_SR_CMD_COMPLETED);
writel_relaxed(QSPI_SR_CMD_COMPLETED, aq->regs + QSPI_IDR);
return err;
}
@ -361,7 +390,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
if (!spi->max_speed_hz)
return -EINVAL;
src_rate = clk_get_rate(aq->clk);
src_rate = clk_get_rate(aq->pclk);
if (!src_rate)
return -EINVAL;
@ -371,7 +400,7 @@ static int atmel_qspi_setup(struct spi_device *spi)
scbr--;
scr = QSPI_SCR_SCBR(scbr);
qspi_writel(aq, QSPI_SCR, scr);
writel_relaxed(scr, aq->regs + QSPI_SCR);
return 0;
}
@ -379,21 +408,25 @@ static int atmel_qspi_setup(struct spi_device *spi)
static int atmel_qspi_init(struct atmel_qspi *aq)
{
/* Reset the QSPI controller */
qspi_writel(aq, QSPI_CR, QSPI_CR_SWRST);
writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR);
/* Set the QSPI controller by default in Serial Memory Mode */
writel_relaxed(QSPI_MR_SMM, aq->regs + QSPI_MR);
aq->mr = QSPI_MR_SMM;
/* Enable the QSPI controller */
qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIEN);
writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR);
return 0;
}
static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id)
{
struct atmel_qspi *aq = (struct atmel_qspi *)dev_id;
struct atmel_qspi *aq = dev_id;
u32 status, mask, pending;
status = qspi_readl(aq, QSPI_SR);
mask = qspi_readl(aq, QSPI_IMR);
status = readl_relaxed(aq->regs + QSPI_SR);
mask = readl_relaxed(aq->regs + QSPI_IMR);
pending = status & mask;
if (!pending)
@ -449,44 +482,74 @@ static int atmel_qspi_probe(struct platform_device *pdev)
}
/* Get the peripheral clock */
aq->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(aq->clk)) {
aq->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
aq->pclk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(aq->pclk)) {
dev_err(&pdev->dev, "missing peripheral clock\n");
err = PTR_ERR(aq->clk);
err = PTR_ERR(aq->pclk);
goto exit;
}
/* Enable the peripheral clock */
err = clk_prepare_enable(aq->clk);
err = clk_prepare_enable(aq->pclk);
if (err) {
dev_err(&pdev->dev, "failed to enable the peripheral clock\n");
goto exit;
}
aq->caps = of_device_get_match_data(&pdev->dev);
if (!aq->caps) {
dev_err(&pdev->dev, "Could not retrieve QSPI caps\n");
err = -EINVAL;
goto exit;
}
if (aq->caps->has_qspick) {
/* Get the QSPI system clock */
aq->qspick = devm_clk_get(&pdev->dev, "qspick");
if (IS_ERR(aq->qspick)) {
dev_err(&pdev->dev, "missing system clock\n");
err = PTR_ERR(aq->qspick);
goto disable_pclk;
}
/* Enable the QSPI system clock */
err = clk_prepare_enable(aq->qspick);
if (err) {
dev_err(&pdev->dev,
"failed to enable the QSPI system clock\n");
goto disable_pclk;
}
}
/* Request the IRQ */
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "missing IRQ\n");
err = irq;
goto disable_clk;
goto disable_qspick;
}
err = devm_request_irq(&pdev->dev, irq, atmel_qspi_interrupt,
0, dev_name(&pdev->dev), aq);
if (err)
goto disable_clk;
goto disable_qspick;
err = atmel_qspi_init(aq);
if (err)
goto disable_clk;
goto disable_qspick;
err = spi_register_controller(ctrl);
if (err)
goto disable_clk;
goto disable_qspick;
return 0;
disable_clk:
clk_disable_unprepare(aq->clk);
disable_qspick:
clk_disable_unprepare(aq->qspick);
disable_pclk:
clk_disable_unprepare(aq->pclk);
exit:
spi_controller_put(ctrl);
@ -499,8 +562,9 @@ static int atmel_qspi_remove(struct platform_device *pdev)
struct atmel_qspi *aq = spi_controller_get_devdata(ctrl);
spi_unregister_controller(ctrl);
qspi_writel(aq, QSPI_CR, QSPI_CR_QSPIDIS);
clk_disable_unprepare(aq->clk);
writel_relaxed(QSPI_CR_QSPIDIS, aq->regs + QSPI_CR);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
return 0;
}
@ -508,7 +572,8 @@ static int __maybe_unused atmel_qspi_suspend(struct device *dev)
{
struct atmel_qspi *aq = dev_get_drvdata(dev);
clk_disable_unprepare(aq->clk);
clk_disable_unprepare(aq->qspick);
clk_disable_unprepare(aq->pclk);
return 0;
}
@ -517,7 +582,8 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
{
struct atmel_qspi *aq = dev_get_drvdata(dev);
clk_prepare_enable(aq->clk);
clk_prepare_enable(aq->pclk);
clk_prepare_enable(aq->qspick);
return atmel_qspi_init(aq);
}
@ -525,8 +591,22 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend,
atmel_qspi_resume);
static const struct atmel_qspi_caps atmel_sama5d2_qspi_caps = {};
static const struct atmel_qspi_caps atmel_sam9x60_qspi_caps = {
.has_qspick = true,
.has_ricr = true,
};
static const struct of_device_id atmel_qspi_dt_ids[] = {
{ .compatible = "atmel,sama5d2-qspi" },
{
.compatible = "atmel,sama5d2-qspi",
.data = &atmel_sama5d2_qspi_caps,
},
{
.compatible = "microchip,sam9x60-qspi",
.data = &atmel_sam9x60_qspi_caps,
},
{ /* sentinel */ }
};

View File

@ -21,18 +21,26 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/bitops.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include <asm/mach-ath79/ath79_spi_platform.h>
#include <linux/platform_data/spi-ath79.h>
#define DRV_NAME "ath79-spi"
#define ATH79_SPI_RRW_DELAY_FACTOR 12000
#define MHZ (1000 * 1000)
#define AR71XX_SPI_REG_FS 0x00 /* Function Select */
#define AR71XX_SPI_REG_CTRL 0x04 /* SPI Control */
#define AR71XX_SPI_REG_IOC 0x08 /* SPI I/O Control */
#define AR71XX_SPI_REG_RDS 0x0c /* Read Data Shift */
#define AR71XX_SPI_FS_GPIO BIT(0) /* Enable GPIO mode */
#define AR71XX_SPI_IOC_DO BIT(0) /* Data Out pin */
#define AR71XX_SPI_IOC_CLK BIT(8) /* CLK pin */
#define AR71XX_SPI_IOC_CS(n) BIT(16 + (n))
struct ath79_spi {
struct spi_bitbang bitbang;
u32 ioc_base;
@ -67,31 +75,14 @@ static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
if (is_active) {
/* set initial clock polarity */
if (spi->mode & SPI_CPOL)
sp->ioc_base |= AR71XX_SPI_IOC_CLK;
else
sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
if (gpio_is_valid(spi->cs_gpio)) {
/* SPI is normally active-low */
gpio_set_value_cansleep(spi->cs_gpio, cs_high);
} else {
u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
if (cs_high)
sp->ioc_base |= cs_bit;
else
sp->ioc_base &= ~cs_bit;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
if (cs_high)
sp->ioc_base |= cs_bit;
else
sp->ioc_base &= ~cs_bit;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
static void ath79_spi_enable(struct ath79_spi *sp)
@ -103,6 +94,9 @@ static void ath79_spi_enable(struct ath79_spi *sp)
sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
/* clear clk and mosi in the base state */
sp->ioc_base &= ~(AR71XX_SPI_IOC_DO | AR71XX_SPI_IOC_CLK);
/* TODO: setup speed? */
ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
}
@ -115,66 +109,6 @@ static void ath79_spi_disable(struct ath79_spi *sp)
ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
}
static int ath79_spi_setup_cs(struct spi_device *spi)
{
struct ath79_spi *sp = ath79_spidev_to_sp(spi);
int status;
status = 0;
if (gpio_is_valid(spi->cs_gpio)) {
unsigned long flags;
flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
flags |= GPIOF_INIT_LOW;
else
flags |= GPIOF_INIT_HIGH;
status = gpio_request_one(spi->cs_gpio, flags,
dev_name(&spi->dev));
} else {
u32 cs_bit = AR71XX_SPI_IOC_CS(spi->chip_select);
if (spi->mode & SPI_CS_HIGH)
sp->ioc_base &= ~cs_bit;
else
sp->ioc_base |= cs_bit;
ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
}
return status;
}
static void ath79_spi_cleanup_cs(struct spi_device *spi)
{
if (gpio_is_valid(spi->cs_gpio))
gpio_free(spi->cs_gpio);
}
static int ath79_spi_setup(struct spi_device *spi)
{
int status = 0;
if (!spi->controller_state) {
status = ath79_spi_setup_cs(spi);
if (status)
return status;
}
status = spi_bitbang_setup(spi);
if (status && !spi->controller_state)
ath79_spi_cleanup_cs(spi);
return status;
}
static void ath79_spi_cleanup(struct spi_device *spi)
{
ath79_spi_cleanup_cs(spi);
spi_bitbang_cleanup(spi);
}
static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned int nsecs,
u32 word, u8 bits, unsigned flags)
{
@ -225,9 +159,10 @@ static int ath79_spi_probe(struct platform_device *pdev)
pdata = dev_get_platdata(&pdev->dev);
master->use_gpio_descriptors = true;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
master->setup = ath79_spi_setup;
master->cleanup = ath79_spi_cleanup;
master->setup = spi_bitbang_setup;
master->cleanup = spi_bitbang_cleanup;
if (pdata) {
master->bus_num = pdata->bus_num;
master->num_chipselect = pdata->num_chipselect;
@ -236,7 +171,6 @@ static int ath79_spi_probe(struct platform_device *pdev)
sp->bitbang.master = master;
sp->bitbang.chipselect = ath79_spi_chipselect;
sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
sp->bitbang.flags = SPI_CS_HIGH;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);

View File

@ -23,8 +23,7 @@
#include <linux/of.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/pinctrl/consumer.h>
#include <linux/pm_runtime.h>
@ -312,7 +311,7 @@ struct atmel_spi {
/* Controller-specific per-slave state */
struct atmel_spi_device {
unsigned int npcs_pin;
struct gpio_desc *npcs_pin;
u32 csr;
};
@ -355,7 +354,6 @@ static bool atmel_spi_is_v2(struct atmel_spi *as)
static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
if (atmel_spi_is_v2(as)) {
@ -379,7 +377,7 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
if (as->use_cs_gpios)
gpio_set_value(asd->npcs_pin, active);
gpiod_set_value(asd->npcs_pin, 1);
} else {
u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
int i;
@ -396,19 +394,16 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
mr = spi_readl(as, MR);
mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
if (as->use_cs_gpios && spi->chip_select != 0)
gpio_set_value(asd->npcs_pin, active);
gpiod_set_value(asd->npcs_pin, 1);
spi_writel(as, MR, mr);
}
dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
asd->npcs_pin, active ? " (high)" : "",
mr);
dev_dbg(&spi->dev, "activate NPCS, mr %08x\n", mr);
}
static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
{
struct atmel_spi_device *asd = spi->controller_state;
unsigned active = spi->mode & SPI_CS_HIGH;
u32 mr;
/* only deactivate *this* device; sometimes transfers to
@ -420,14 +415,12 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
spi_writel(as, MR, mr);
}
dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
asd->npcs_pin, active ? " (low)" : "",
mr);
dev_dbg(&spi->dev, "DEactivate NPCS, mr %08x\n", mr);
if (!as->use_cs_gpios)
spi_writel(as, CR, SPI_BIT(LASTXFER));
else if (atmel_spi_is_v2(as) || spi->chip_select != 0)
gpio_set_value(asd->npcs_pin, !active);
gpiod_set_value(asd->npcs_pin, 0);
}
static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
@ -1188,7 +1181,6 @@ static int atmel_spi_setup(struct spi_device *spi)
struct atmel_spi_device *asd;
u32 csr;
unsigned int bits = spi->bits_per_word;
unsigned int npcs_pin;
as = spi_master_get_devdata(spi->master);
@ -1209,21 +1201,14 @@ static int atmel_spi_setup(struct spi_device *spi)
csr |= SPI_BIT(CSAAT);
/* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
*
* DLYBCT would add delays between words, slowing down transfers.
* It could potentially be useful to cope with DMA bottlenecks, but
* in those cases it's probably best to just use a lower bitrate.
*/
csr |= SPI_BF(DLYBS, 0);
csr |= SPI_BF(DLYBCT, 0);
/* chipselect must have been muxed as GPIO (e.g. in board setup) */
npcs_pin = (unsigned long)spi->controller_data;
if (!as->use_cs_gpios)
npcs_pin = spi->chip_select;
else if (gpio_is_valid(spi->cs_gpio))
npcs_pin = spi->cs_gpio;
/* DLYBCT adds delays between words. This is useful for slow devices
* that need a bit of time to setup the next transfer.
*/
csr |= SPI_BF(DLYBCT,
(as->spi_clk / 1000000 * spi->word_delay_usecs) >> 5);
asd = spi->controller_state;
if (!asd) {
@ -1231,11 +1216,21 @@ static int atmel_spi_setup(struct spi_device *spi)
if (!asd)
return -ENOMEM;
if (as->use_cs_gpios)
gpio_direction_output(npcs_pin,
!(spi->mode & SPI_CS_HIGH));
/*
* If use_cs_gpios is true this means that we have "cs-gpios"
* defined in the device tree node so we should have
* gotten the GPIO lines from the device tree inside the
* SPI core. Warn if this is not the case but continue since
* CS GPIOs are after all optional.
*/
if (as->use_cs_gpios) {
if (!spi->cs_gpiod) {
dev_err(&spi->dev,
"host claims to use CS GPIOs but no CS found in DT by the SPI core\n");
}
asd->npcs_pin = spi->cs_gpiod;
}
asd->npcs_pin = npcs_pin;
spi->controller_state = asd;
}
@ -1473,41 +1468,6 @@ static void atmel_get_caps(struct atmel_spi *as)
as->caps.has_pdc_support = version < 0x212;
}
/*-------------------------------------------------------------------------*/
static int atmel_spi_gpio_cs(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct atmel_spi *as = spi_master_get_devdata(master);
struct device_node *np = master->dev.of_node;
int i;
int ret = 0;
int nb = 0;
if (!as->use_cs_gpios)
return 0;
if (!np)
return 0;
nb = of_gpio_named_count(np, "cs-gpios");
for (i = 0; i < nb; i++) {
int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
"cs-gpios", i);
if (cs_gpio == -EPROBE_DEFER)
return cs_gpio;
if (gpio_is_valid(cs_gpio)) {
ret = devm_gpio_request(&pdev->dev, cs_gpio,
dev_name(&pdev->dev));
if (ret)
return ret;
}
}
return 0;
}
static void atmel_spi_init(struct atmel_spi *as)
{
spi_writel(as, CR, SPI_BIT(SWRST));
@ -1560,6 +1520,7 @@ static int atmel_spi_probe(struct platform_device *pdev)
goto out_free;
/* the spi->mode bits understood by this driver: */
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->dev.of_node = pdev->dev.of_node;
@ -1592,6 +1553,11 @@ static int atmel_spi_probe(struct platform_device *pdev)
atmel_get_caps(as);
/*
* If there are chip selects in the device tree, those will be
* discovered by the SPI core when registering the SPI master
* and assigned to each SPI device.
*/
as->use_cs_gpios = true;
if (atmel_spi_is_v2(as) &&
pdev->dev.of_node &&
@ -1600,10 +1566,6 @@ static int atmel_spi_probe(struct platform_device *pdev)
master->num_chipselect = 4;
}
ret = atmel_spi_gpio_cs(pdev);
if (ret)
goto out_unmap_regs;
as->use_dma = false;
as->use_pdc = false;
if (as->caps.has_dma_support) {

View File

@ -456,7 +456,7 @@ static int bcm2835aux_spi_probe(struct platform_device *pdev)
}
bs->clk = devm_clk_get(&pdev->dev, NULL);
if ((!bs->clk) || (IS_ERR(bs->clk))) {
if (IS_ERR(bs->clk)) {
err = PTR_ERR(bs->clk);
dev_err(&pdev->dev, "could not get clk: %d\n", err);
goto out_master_put;

View File

@ -213,19 +213,6 @@ int spi_bitbang_setup(struct spi_device *spi)
dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
/* NOTE we _need_ to call chipselect() early, ideally with adapter
* setup, unless the hardware defaults cooperate to avoid confusion
* between normal (active low) and inverted chipselects.
*/
/* deselect chip (low or high) */
mutex_lock(&bitbang->lock);
if (!bitbang->busy) {
bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
ndelay(cs->nsecs);
}
mutex_unlock(&bitbang->lock);
return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_setup);

View File

@ -13,7 +13,7 @@
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
@ -128,10 +128,6 @@ struct cdns_spi {
u32 is_decoded_cs;
};
struct cdns_spi_device_data {
bool gpio_requested;
};
/* Macros for the SPI controller read/write */
static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
{
@ -176,16 +172,16 @@ static void cdns_spi_init_hw(struct cdns_spi *xspi)
/**
* cdns_spi_chipselect - Select or deselect the chip select line
* @spi: Pointer to the spi_device structure
* @is_high: Select(0) or deselect (1) the chip select line
* @enable: Select (1) or deselect (0) the chip select line
*/
static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
static void cdns_spi_chipselect(struct spi_device *spi, bool enable)
{
struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
u32 ctrl_reg;
ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR);
if (is_high) {
if (!enable) {
/* Deselect the slave */
ctrl_reg |= CDNS_SPI_CR_SSCTRL;
} else {
@ -469,64 +465,6 @@ static int cdns_unprepare_transfer_hardware(struct spi_master *master)
return 0;
}
static int cdns_spi_setup(struct spi_device *spi)
{
int ret = -EINVAL;
struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
/* this is a pin managed by the controller, leave it alone */
if (spi->cs_gpio == -ENOENT)
return 0;
/* this seems to be the first time we're here */
if (!cdns_spi_data) {
cdns_spi_data = kzalloc(sizeof(*cdns_spi_data), GFP_KERNEL);
if (!cdns_spi_data)
return -ENOMEM;
cdns_spi_data->gpio_requested = false;
spi_set_ctldata(spi, cdns_spi_data);
}
/* if we haven't done so, grab the gpio */
if (!cdns_spi_data->gpio_requested && gpio_is_valid(spi->cs_gpio)) {
ret = gpio_request_one(spi->cs_gpio,
(spi->mode & SPI_CS_HIGH) ?
GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
dev_name(&spi->dev));
if (ret)
dev_err(&spi->dev, "can't request chipselect gpio %d\n",
spi->cs_gpio);
else
cdns_spi_data->gpio_requested = true;
} else {
if (gpio_is_valid(spi->cs_gpio)) {
int mode = ((spi->mode & SPI_CS_HIGH) ?
GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH);
ret = gpio_direction_output(spi->cs_gpio, mode);
if (ret)
dev_err(&spi->dev, "chipselect gpio %d setup failed (%d)\n",
spi->cs_gpio, ret);
}
}
return ret;
}
static void cdns_spi_cleanup(struct spi_device *spi)
{
struct cdns_spi_device_data *cdns_spi_data = spi_get_ctldata(spi);
if (cdns_spi_data) {
if (cdns_spi_data->gpio_requested)
gpio_free(spi->cs_gpio);
kfree(cdns_spi_data);
spi_set_ctldata(spi, NULL);
}
}
/**
* cdns_spi_probe - Probe method for the SPI driver
* @pdev: Pointer to the platform_device structure
@ -584,11 +522,6 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_apb;
}
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
if (ret < 0)
master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
@ -603,8 +536,10 @@ static int cdns_spi_probe(struct platform_device *pdev)
/* SPI controller initializations */
cdns_spi_init_hw(xspi);
pm_runtime_mark_last_busy(&pdev->dev);
pm_runtime_put_autosuspend(&pdev->dev);
pm_runtime_set_active(&pdev->dev);
pm_runtime_enable(&pdev->dev);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
@ -621,13 +556,12 @@ static int cdns_spi_probe(struct platform_device *pdev)
goto clk_dis_all;
}
master->use_gpio_descriptors = true;
master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
master->prepare_message = cdns_prepare_message;
master->transfer_one = cdns_transfer_one;
master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
master->set_cs = cdns_spi_chipselect;
master->setup = cdns_spi_setup;
master->cleanup = cdns_spi_cleanup;
master->auto_runtime_pm = true;
master->mode_bits = SPI_CPOL | SPI_CPHA;

View File

@ -11,7 +11,7 @@
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
@ -36,25 +36,6 @@ struct spi_clps711x_data {
int len;
};
static int spi_clps711x_setup(struct spi_device *spi)
{
if (!spi->controller_state) {
int ret;
ret = devm_gpio_request(&spi->master->dev, spi->cs_gpio,
dev_name(&spi->master->dev));
if (ret)
return ret;
spi->controller_state = spi;
}
/* We are expect that SPI-device is not selected */
gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
return 0;
}
static int spi_clps711x_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@ -125,11 +106,11 @@ static int spi_clps711x_probe(struct platform_device *pdev)
if (!master)
return -ENOMEM;
master->use_gpio_descriptors = true;
master->bus_num = -1;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
master->dev.of_node = pdev->dev.of_node;
master->setup = spi_clps711x_setup;
master->prepare_message = spi_clps711x_prepare_message;
master->transfer_one = spi_clps711x_transfer_one;

View File

@ -15,7 +15,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_gpio.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi_bitbang.h>
#include <linux/slab.h>
@ -222,12 +221,17 @@ static void davinci_spi_chipselect(struct spi_device *spi, int value)
* Board specific chip select logic decides the polarity and cs
* line for the controller
*/
if (spi->cs_gpio >= 0) {
if (spi->cs_gpiod) {
/*
* FIXME: is this code ever executed? This host does not
* set SPI_MASTER_GPIO_SS so this chipselect callback should
* not get called from the SPI core when we are using
* GPIOs for chip select.
*/
if (value == BITBANG_CS_ACTIVE)
gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH);
gpiod_set_value(spi->cs_gpiod, 1);
else
gpio_set_value(spi->cs_gpio,
!(spi->mode & SPI_CS_HIGH));
gpiod_set_value(spi->cs_gpiod, 0);
} else {
if (value == BITBANG_CS_ACTIVE) {
if (!(spi->mode & SPI_CS_WORD))
@ -418,30 +422,18 @@ static int davinci_spi_of_setup(struct spi_device *spi)
*/
static int davinci_spi_setup(struct spi_device *spi)
{
int retval = 0;
struct davinci_spi *dspi;
struct spi_master *master = spi->master;
struct device_node *np = spi->dev.of_node;
bool internal_cs = true;
dspi = spi_master_get_devdata(spi->master);
if (!(spi->mode & SPI_NO_CS)) {
if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
retval = gpio_direction_output(
spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
if (np && spi->cs_gpiod)
internal_cs = false;
}
if (retval) {
dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
spi->cs_gpio, retval);
return retval;
}
if (internal_cs) {
if (internal_cs)
set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
}
}
if (spi->mode & SPI_READY)
@ -962,6 +954,7 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (ret)
goto free_master;
master->use_gpio_descriptors = true;
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->num_chipselect = pdata->num_chipselect;
@ -980,27 +973,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
if (dspi->version == SPI_VERSION_2)
dspi->bitbang.flags |= SPI_READY;
if (pdev->dev.of_node) {
int i;
for (i = 0; i < pdata->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
"cs-gpios", i);
if (cs_gpio == -EPROBE_DEFER) {
ret = cs_gpio;
goto free_clk;
}
if (gpio_is_valid(cs_gpio)) {
ret = devm_gpio_request(&pdev->dev, cs_gpio,
dev_name(&pdev->dev));
if (ret)
goto free_clk;
}
}
}
dspi->bitbang.txrx_bufs = davinci_spi_bufs;
ret = davinci_spi_request_dma(dspi);

View File

@ -18,7 +18,6 @@
#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/of_platform.h>
#include <linux/acpi.h>
#include <linux/property.h>
@ -185,27 +184,6 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
dws->num_cs = num_cs;
if (pdev->dev.of_node) {
int i;
for (i = 0; i < dws->num_cs; i++) {
int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
"cs-gpios", i);
if (cs_gpio == -EPROBE_DEFER) {
ret = cs_gpio;
goto out;
}
if (gpio_is_valid(cs_gpio)) {
ret = devm_gpio_request(&pdev->dev, cs_gpio,
dev_name(&pdev->dev));
if (ret)
goto out;
}
}
}
init_func = device_get_match_data(&pdev->dev);
if (init_func) {
ret = init_func(pdev, dwsmmio);

View File

@ -20,7 +20,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/gpio.h>
#include "spi-dw.h"
@ -138,11 +137,10 @@ void dw_spi_set_cs(struct spi_device *spi, bool enable)
struct dw_spi *dws = spi_controller_get_devdata(spi->controller);
struct chip_data *chip = spi_get_ctldata(spi);
/* Chip select logic is inverted from spi_set_cs() */
if (chip && chip->cs_control)
chip->cs_control(!enable);
chip->cs_control(enable);
if (!enable)
if (enable)
dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
else if (dws->cs_override)
dw_writel(dws, DW_SPI_SER, 0);
@ -317,7 +315,8 @@ static int dw_spi_transfer_one(struct spi_controller *master,
/* Default SPI mode is SCPOL = 0, SCPH = 0 */
cr0 = (transfer->bits_per_word - 1)
| (chip->type << SPI_FRF_OFFSET)
| (spi->mode << SPI_MODE_OFFSET)
| ((((spi->mode & SPI_CPOL) ? 1 : 0) << SPI_SCOL_OFFSET) |
(((spi->mode & SPI_CPHA) ? 1 : 0) << SPI_SCPH_OFFSET))
| (chip->tmode << SPI_TMOD_OFFSET);
/*
@ -397,7 +396,6 @@ static int dw_spi_setup(struct spi_device *spi)
{
struct dw_spi_chip *chip_info = NULL;
struct chip_data *chip;
int ret;
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
@ -425,13 +423,6 @@ static int dw_spi_setup(struct spi_device *spi)
chip->tmode = SPI_TMOD_TR;
if (gpio_is_valid(spi->cs_gpio)) {
ret = gpio_direction_output(spi->cs_gpio,
!(spi->mode & SPI_CS_HIGH));
if (ret)
return ret;
}
return 0;
}
@ -496,6 +487,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
goto err_free_master;
}
master->use_gpio_descriptors = true;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
master->bus_num = dws->bus_num;

View File

@ -233,6 +233,9 @@ static u32 dspi_pop_tx_pushr(struct fsl_dspi *dspi)
{
u16 cmd = dspi->tx_cmd, data = dspi_pop_tx(dspi);
if (spi_controller_is_slave(dspi->master))
return data;
if (dspi->len > 0)
cmd |= SPI_PUSHR_CMD_CONT;
return cmd << 16 | data;
@ -329,6 +332,11 @@ static int dspi_next_xfer_dma_submit(struct fsl_dspi *dspi)
dma_async_issue_pending(dma->chan_rx);
dma_async_issue_pending(dma->chan_tx);
if (spi_controller_is_slave(dspi->master)) {
wait_for_completion_interruptible(&dspi->dma->cmd_rx_complete);
return 0;
}
time_left = wait_for_completion_timeout(&dspi->dma->cmd_tx_complete,
DMA_COMPLETION_TIMEOUT);
if (time_left == 0) {
@ -798,14 +806,18 @@ static int dspi_setup(struct spi_device *spi)
ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
chip->ctar_val = SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
| SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
| SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
| SPI_CTAR_PCSSCK(pcssck)
| SPI_CTAR_CSSCK(cssck)
| SPI_CTAR_PASC(pasc)
| SPI_CTAR_ASC(asc)
| SPI_CTAR_PBR(pbr)
| SPI_CTAR_BR(br);
| SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0);
if (!spi_controller_is_slave(dspi->master)) {
chip->ctar_val |= SPI_CTAR_LSBFE(spi->mode &
SPI_LSB_FIRST ? 1 : 0)
| SPI_CTAR_PCSSCK(pcssck)
| SPI_CTAR_CSSCK(cssck)
| SPI_CTAR_PASC(pasc)
| SPI_CTAR_ASC(asc)
| SPI_CTAR_PBR(pbr)
| SPI_CTAR_BR(br);
}
spi_set_ctldata(spi, chip);
@ -970,8 +982,13 @@ static const struct regmap_config dspi_xspi_regmap_config[] = {
static void dspi_init(struct fsl_dspi *dspi)
{
regmap_write(dspi->regmap, SPI_MCR, SPI_MCR_MASTER | SPI_MCR_PCSIS |
(dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0));
unsigned int mcr = SPI_MCR_PCSIS |
(dspi->devtype_data->xspi_mode ? SPI_MCR_XSPI : 0);
if (!spi_controller_is_slave(dspi->master))
mcr |= SPI_MCR_MASTER;
regmap_write(dspi->regmap, SPI_MCR, mcr);
regmap_write(dspi->regmap, SPI_SR, SPI_SR_CLEAR);
if (dspi->devtype_data->xspi_mode)
regmap_write(dspi->regmap, SPI_CTARE(0),
@ -1027,6 +1044,9 @@ static int dspi_probe(struct platform_device *pdev)
}
master->bus_num = bus_num;
if (of_property_read_bool(np, "spi-slave"))
master->slave = true;
dspi->devtype_data = of_device_get_match_data(&pdev->dev);
if (!dspi->devtype_data) {
dev_err(&pdev->dev, "can't get devtype_data\n");

966
drivers/spi/spi-fsl-qspi.c Normal file
View File

@ -0,0 +1,966 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Freescale QuadSPI driver.
*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
* Copyright (C) 2018 Bootlin
* Copyright (C) 2018 exceet electronics GmbH
* Copyright (C) 2018 Kontron Electronics GmbH
*
* Transition to SPI MEM interface:
* Authors:
* Boris Brezillon <bbrezillon@kernel.org>
* Frieder Schrempf <frieder.schrempf@kontron.de>
* Yogesh Gaur <yogeshnarayan.gaur@nxp.com>
* Suresh Gupta <suresh.gupta@nxp.com>
*
* Based on the original fsl-quadspi.c spi-nor driver:
* Author: Freescale Semiconductor, Inc.
*
*/
#include <linux/bitops.h>
#include <linux/clk.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_qos.h>
#include <linux/sizes.h>
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
/*
* The driver only uses one single LUT entry, that is updated on
* each call of exec_op(). Index 0 is preset at boot with a basic
* read operation, so let's use the last entry (15).
*/
#define SEQID_LUT 15
/* Registers used by the driver */
#define QUADSPI_MCR 0x00
#define QUADSPI_MCR_RESERVED_MASK GENMASK(19, 16)
#define QUADSPI_MCR_MDIS_MASK BIT(14)
#define QUADSPI_MCR_CLR_TXF_MASK BIT(11)
#define QUADSPI_MCR_CLR_RXF_MASK BIT(10)
#define QUADSPI_MCR_DDR_EN_MASK BIT(7)
#define QUADSPI_MCR_END_CFG_MASK GENMASK(3, 2)
#define QUADSPI_MCR_SWRSTHD_MASK BIT(1)
#define QUADSPI_MCR_SWRSTSD_MASK BIT(0)
#define QUADSPI_IPCR 0x08
#define QUADSPI_IPCR_SEQID(x) ((x) << 24)
#define QUADSPI_BUF3CR 0x1c
#define QUADSPI_BUF3CR_ALLMST_MASK BIT(31)
#define QUADSPI_BUF3CR_ADATSZ(x) ((x) << 8)
#define QUADSPI_BUF3CR_ADATSZ_MASK GENMASK(15, 8)
#define QUADSPI_BFGENCR 0x20
#define QUADSPI_BFGENCR_SEQID(x) ((x) << 12)
#define QUADSPI_BUF0IND 0x30
#define QUADSPI_BUF1IND 0x34
#define QUADSPI_BUF2IND 0x38
#define QUADSPI_SFAR 0x100
#define QUADSPI_SMPR 0x108
#define QUADSPI_SMPR_DDRSMP_MASK GENMASK(18, 16)
#define QUADSPI_SMPR_FSDLY_MASK BIT(6)
#define QUADSPI_SMPR_FSPHS_MASK BIT(5)
#define QUADSPI_SMPR_HSENA_MASK BIT(0)
#define QUADSPI_RBCT 0x110
#define QUADSPI_RBCT_WMRK_MASK GENMASK(4, 0)
#define QUADSPI_RBCT_RXBRD_USEIPS BIT(8)
#define QUADSPI_TBDR 0x154
#define QUADSPI_SR 0x15c
#define QUADSPI_SR_IP_ACC_MASK BIT(1)
#define QUADSPI_SR_AHB_ACC_MASK BIT(2)
#define QUADSPI_FR 0x160
#define QUADSPI_FR_TFF_MASK BIT(0)
#define QUADSPI_SPTRCLR 0x16c
#define QUADSPI_SPTRCLR_IPPTRC BIT(8)
#define QUADSPI_SPTRCLR_BFPTRC BIT(0)
#define QUADSPI_SFA1AD 0x180
#define QUADSPI_SFA2AD 0x184
#define QUADSPI_SFB1AD 0x188
#define QUADSPI_SFB2AD 0x18c
#define QUADSPI_RBDR(x) (0x200 + ((x) * 4))
#define QUADSPI_LUTKEY 0x300
#define QUADSPI_LUTKEY_VALUE 0x5AF05AF0
#define QUADSPI_LCKCR 0x304
#define QUADSPI_LCKER_LOCK BIT(0)
#define QUADSPI_LCKER_UNLOCK BIT(1)
#define QUADSPI_RSER 0x164
#define QUADSPI_RSER_TFIE BIT(0)
#define QUADSPI_LUT_BASE 0x310
#define QUADSPI_LUT_OFFSET (SEQID_LUT * 4 * 4)
#define QUADSPI_LUT_REG(idx) \
(QUADSPI_LUT_BASE + QUADSPI_LUT_OFFSET + (idx) * 4)
/* Instruction set for the LUT register */
#define LUT_STOP 0
#define LUT_CMD 1
#define LUT_ADDR 2
#define LUT_DUMMY 3
#define LUT_MODE 4
#define LUT_MODE2 5
#define LUT_MODE4 6
#define LUT_FSL_READ 7
#define LUT_FSL_WRITE 8
#define LUT_JMP_ON_CS 9
#define LUT_ADDR_DDR 10
#define LUT_MODE_DDR 11
#define LUT_MODE2_DDR 12
#define LUT_MODE4_DDR 13
#define LUT_FSL_READ_DDR 14
#define LUT_FSL_WRITE_DDR 15
#define LUT_DATA_LEARN 16
/*
* The PAD definitions for LUT register.
*
* The pad stands for the number of IO lines [0:3].
* For example, the quad read needs four IO lines,
* so you should use LUT_PAD(4).
*/
#define LUT_PAD(x) (fls(x) - 1)
/*
* Macro for constructing the LUT entries with the following
* register layout:
*
* ---------------------------------------------------
* | INSTR1 | PAD1 | OPRND1 | INSTR0 | PAD0 | OPRND0 |
* ---------------------------------------------------
*/
#define LUT_DEF(idx, ins, pad, opr) \
((((ins) << 10) | ((pad) << 8) | (opr)) << (((idx) % 2) * 16))
/* Controller needs driver to swap endianness */
#define QUADSPI_QUIRK_SWAP_ENDIAN BIT(0)
/* Controller needs 4x internal clock */
#define QUADSPI_QUIRK_4X_INT_CLK BIT(1)
/*
* TKT253890, the controller needs the driver to fill the txfifo with
* 16 bytes at least to trigger a data transfer, even though the extra
* data won't be transferred.
*/
#define QUADSPI_QUIRK_TKT253890 BIT(2)
/* TKT245618, the controller cannot wake up from wait mode */
#define QUADSPI_QUIRK_TKT245618 BIT(3)
/*
* Controller adds QSPI_AMBA_BASE (base address of the mapped memory)
* internally. No need to add it when setting SFXXAD and SFAR registers
*/
#define QUADSPI_QUIRK_BASE_INTERNAL BIT(4)
struct fsl_qspi_devtype_data {
unsigned int rxfifo;
unsigned int txfifo;
unsigned int ahb_buf_size;
unsigned int quirks;
bool little_endian;
};
static const struct fsl_qspi_devtype_data vybrid_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_SWAP_ENDIAN,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx6sx_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_4X_INT_CLK | QUADSPI_QUIRK_TKT245618,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx7d_data = {
.rxfifo = SZ_512,
.txfifo = SZ_512,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data imx6ul_data = {
.rxfifo = SZ_128,
.txfifo = SZ_512,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_4X_INT_CLK,
.little_endian = true,
};
static const struct fsl_qspi_devtype_data ls1021a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.ahb_buf_size = SZ_1K,
.quirks = 0,
.little_endian = false,
};
static const struct fsl_qspi_devtype_data ls2080a_data = {
.rxfifo = SZ_128,
.txfifo = SZ_64,
.ahb_buf_size = SZ_1K,
.quirks = QUADSPI_QUIRK_TKT253890 | QUADSPI_QUIRK_BASE_INTERNAL,
.little_endian = true,
};
struct fsl_qspi {
void __iomem *iobase;
void __iomem *ahb_addr;
u32 memmap_phy;
struct clk *clk, *clk_en;
struct device *dev;
struct completion c;
const struct fsl_qspi_devtype_data *devtype_data;
struct mutex lock;
struct pm_qos_request pm_qos_req;
int selected;
};
static inline int needs_swap_endian(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_SWAP_ENDIAN;
}
static inline int needs_4x_clock(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_4X_INT_CLK;
}
static inline int needs_fill_txfifo(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_TKT253890;
}
static inline int needs_wakeup_wait_mode(struct fsl_qspi *q)
{
return q->devtype_data->quirks & QUADSPI_QUIRK_TKT245618;
}
static inline int needs_amba_base_offset(struct fsl_qspi *q)
{
return !(q->devtype_data->quirks & QUADSPI_QUIRK_BASE_INTERNAL);
}
/*
* An IC bug makes it necessary to rearrange the 32-bit data.
* Later chips, such as IMX6SLX, have fixed this bug.
*/
static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a)
{
return needs_swap_endian(q) ? __swab32(a) : a;
}
/*
* R/W functions for big- or little-endian registers:
* The QSPI controller's endianness is independent of
* the CPU core's endianness. So far, although the CPU
* core is little-endian the QSPI controller can use
* big-endian or little-endian.
*/
static void qspi_writel(struct fsl_qspi *q, u32 val, void __iomem *addr)
{
if (q->devtype_data->little_endian)
iowrite32(val, addr);
else
iowrite32be(val, addr);
}
static u32 qspi_readl(struct fsl_qspi *q, void __iomem *addr)
{
if (q->devtype_data->little_endian)
return ioread32(addr);
return ioread32be(addr);
}
static irqreturn_t fsl_qspi_irq_handler(int irq, void *dev_id)
{
struct fsl_qspi *q = dev_id;
u32 reg;
/* clear interrupt */
reg = qspi_readl(q, q->iobase + QUADSPI_FR);
qspi_writel(q, reg, q->iobase + QUADSPI_FR);
if (reg & QUADSPI_FR_TFF_MASK)
complete(&q->c);
dev_dbg(q->dev, "QUADSPI_FR : 0x%.8x:0x%.8x\n", 0, reg);
return IRQ_HANDLED;
}
static int fsl_qspi_check_buswidth(struct fsl_qspi *q, u8 width)
{
switch (width) {
case 1:
case 2:
case 4:
return 0;
}
return -ENOTSUPP;
}
static bool fsl_qspi_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
int ret;
ret = fsl_qspi_check_buswidth(q, op->cmd.buswidth);
if (op->addr.nbytes)
ret |= fsl_qspi_check_buswidth(q, op->addr.buswidth);
if (op->dummy.nbytes)
ret |= fsl_qspi_check_buswidth(q, op->dummy.buswidth);
if (op->data.nbytes)
ret |= fsl_qspi_check_buswidth(q, op->data.buswidth);
if (ret)
return false;
/*
* The number of instructions needed for the op, needs
* to fit into a single LUT entry.
*/
if (op->addr.nbytes +
(op->dummy.nbytes ? 1:0) +
(op->data.nbytes ? 1:0) > 6)
return false;
/* Max 64 dummy clock cycles supported */
if (op->dummy.nbytes &&
(op->dummy.nbytes * 8 / op->dummy.buswidth > 64))
return false;
/* Max data length, check controller limits and alignment */
if (op->data.dir == SPI_MEM_DATA_IN &&
(op->data.nbytes > q->devtype_data->ahb_buf_size ||
(op->data.nbytes > q->devtype_data->rxfifo - 4 &&
!IS_ALIGNED(op->data.nbytes, 8))))
return false;
if (op->data.dir == SPI_MEM_DATA_OUT &&
op->data.nbytes > q->devtype_data->txfifo)
return false;
return true;
}
static void fsl_qspi_prepare_lut(struct fsl_qspi *q,
const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
u32 lutval[4] = {};
int lutidx = 1, i;
lutval[0] |= LUT_DEF(0, LUT_CMD, LUT_PAD(op->cmd.buswidth),
op->cmd.opcode);
/*
* For some unknown reason, using LUT_ADDR doesn't work in some
* cases (at least with only one byte long addresses), so
* let's use LUT_MODE to write the address bytes one by one
*/
for (i = 0; i < op->addr.nbytes; i++) {
u8 addrbyte = op->addr.val >> (8 * (op->addr.nbytes - i - 1));
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_MODE,
LUT_PAD(op->addr.buswidth),
addrbyte);
lutidx++;
}
if (op->dummy.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_DUMMY,
LUT_PAD(op->dummy.buswidth),
op->dummy.nbytes * 8 /
op->dummy.buswidth);
lutidx++;
}
if (op->data.nbytes) {
lutval[lutidx / 2] |= LUT_DEF(lutidx,
op->data.dir == SPI_MEM_DATA_IN ?
LUT_FSL_READ : LUT_FSL_WRITE,
LUT_PAD(op->data.buswidth),
0);
lutidx++;
}
lutval[lutidx / 2] |= LUT_DEF(lutidx, LUT_STOP, 0, 0);
/* unlock LUT */
qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
qspi_writel(q, QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR);
/* fill LUT */
for (i = 0; i < ARRAY_SIZE(lutval); i++)
qspi_writel(q, lutval[i], base + QUADSPI_LUT_REG(i));
/* lock LUT */
qspi_writel(q, QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY);
qspi_writel(q, QUADSPI_LCKER_LOCK, q->iobase + QUADSPI_LCKCR);
}
static int fsl_qspi_clk_prep_enable(struct fsl_qspi *q)
{
int ret;
ret = clk_prepare_enable(q->clk_en);
if (ret)
return ret;
ret = clk_prepare_enable(q->clk);
if (ret) {
clk_disable_unprepare(q->clk_en);
return ret;
}
if (needs_wakeup_wait_mode(q))
pm_qos_add_request(&q->pm_qos_req, PM_QOS_CPU_DMA_LATENCY, 0);
return 0;
}
static void fsl_qspi_clk_disable_unprep(struct fsl_qspi *q)
{
if (needs_wakeup_wait_mode(q))
pm_qos_remove_request(&q->pm_qos_req);
clk_disable_unprepare(q->clk);
clk_disable_unprepare(q->clk_en);
}
/*
* If we have changed the content of the flash by writing or erasing, or if we
* read from flash with a different offset into the page buffer, we need to
* invalidate the AHB buffer. If we do not do so, we may read out the wrong
* data. The spec tells us reset the AHB domain and Serial Flash domain at
* the same time.
*/
static void fsl_qspi_invalidate(struct fsl_qspi *q)
{
u32 reg;
reg = qspi_readl(q, q->iobase + QUADSPI_MCR);
reg |= QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK;
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
/*
* The minimum delay : 1 AHB + 2 SFCK clocks.
* Delay 1 us is enough.
*/
udelay(1);
reg &= ~(QUADSPI_MCR_SWRSTHD_MASK | QUADSPI_MCR_SWRSTSD_MASK);
qspi_writel(q, reg, q->iobase + QUADSPI_MCR);
}
static void fsl_qspi_select_mem(struct fsl_qspi *q, struct spi_device *spi)
{
unsigned long rate = spi->max_speed_hz;
int ret;
if (q->selected == spi->chip_select)
return;
if (needs_4x_clock(q))
rate *= 4;
fsl_qspi_clk_disable_unprep(q);
ret = clk_set_rate(q->clk, rate);
if (ret)
return;
ret = fsl_qspi_clk_prep_enable(q);
if (ret)
return;
q->selected = spi->chip_select;
fsl_qspi_invalidate(q);
}
static void fsl_qspi_read_ahb(struct fsl_qspi *q, const struct spi_mem_op *op)
{
memcpy_fromio(op->data.buf.in,
q->ahb_addr + q->selected * q->devtype_data->ahb_buf_size,
op->data.nbytes);
}
static void fsl_qspi_fill_txfifo(struct fsl_qspi *q,
const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
int i;
u32 val;
for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
memcpy(&val, op->data.buf.out + i, 4);
val = fsl_qspi_endian_xchg(q, val);
qspi_writel(q, val, base + QUADSPI_TBDR);
}
if (i < op->data.nbytes) {
memcpy(&val, op->data.buf.out + i, op->data.nbytes - i);
val = fsl_qspi_endian_xchg(q, val);
qspi_writel(q, val, base + QUADSPI_TBDR);
}
if (needs_fill_txfifo(q)) {
for (i = op->data.nbytes; i < 16; i += 4)
qspi_writel(q, 0, base + QUADSPI_TBDR);
}
}
static void fsl_qspi_read_rxfifo(struct fsl_qspi *q,
const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
int i;
u8 *buf = op->data.buf.in;
u32 val;
for (i = 0; i < ALIGN_DOWN(op->data.nbytes, 4); i += 4) {
val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
val = fsl_qspi_endian_xchg(q, val);
memcpy(buf + i, &val, 4);
}
if (i < op->data.nbytes) {
val = qspi_readl(q, base + QUADSPI_RBDR(i / 4));
val = fsl_qspi_endian_xchg(q, val);
memcpy(buf + i, &val, op->data.nbytes - i);
}
}
static int fsl_qspi_do_op(struct fsl_qspi *q, const struct spi_mem_op *op)
{
void __iomem *base = q->iobase;
int err = 0;
init_completion(&q->c);
/*
* Always start the sequence at the same index since we update
* the LUT at each exec_op() call. And also specify the DATA
* length, since it's has not been specified in the LUT.
*/
qspi_writel(q, op->data.nbytes | QUADSPI_IPCR_SEQID(SEQID_LUT),
base + QUADSPI_IPCR);
/* Wait for the interrupt. */
if (!wait_for_completion_timeout(&q->c, msecs_to_jiffies(1000)))
err = -ETIMEDOUT;
if (!err && op->data.nbytes && op->data.dir == SPI_MEM_DATA_IN)
fsl_qspi_read_rxfifo(q, op);
return err;
}
static int fsl_qspi_readl_poll_tout(struct fsl_qspi *q, void __iomem *base,
u32 mask, u32 delay_us, u32 timeout_us)
{
u32 reg;
if (!q->devtype_data->little_endian)
mask = (u32)cpu_to_be32(mask);
return readl_poll_timeout(base, reg, !(reg & mask), delay_us,
timeout_us);
}
static int fsl_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
void __iomem *base = q->iobase;
u32 addr_offset = 0;
int err = 0;
mutex_lock(&q->lock);
/* wait for the controller being ready */
fsl_qspi_readl_poll_tout(q, base + QUADSPI_SR, (QUADSPI_SR_IP_ACC_MASK |
QUADSPI_SR_AHB_ACC_MASK), 10, 1000);
fsl_qspi_select_mem(q, mem->spi);
if (needs_amba_base_offset(q))
addr_offset = q->memmap_phy;
qspi_writel(q,
q->selected * q->devtype_data->ahb_buf_size + addr_offset,
base + QUADSPI_SFAR);
qspi_writel(q, qspi_readl(q, base + QUADSPI_MCR) |
QUADSPI_MCR_CLR_RXF_MASK | QUADSPI_MCR_CLR_TXF_MASK,
base + QUADSPI_MCR);
qspi_writel(q, QUADSPI_SPTRCLR_BFPTRC | QUADSPI_SPTRCLR_IPPTRC,
base + QUADSPI_SPTRCLR);
fsl_qspi_prepare_lut(q, op);
/*
* If we have large chunks of data, we read them through the AHB bus
* by accessing the mapped memory. In all other cases we use
* IP commands to access the flash.
*/
if (op->data.nbytes > (q->devtype_data->rxfifo - 4) &&
op->data.dir == SPI_MEM_DATA_IN) {
fsl_qspi_read_ahb(q, op);
} else {
qspi_writel(q, QUADSPI_RBCT_WMRK_MASK |
QUADSPI_RBCT_RXBRD_USEIPS, base + QUADSPI_RBCT);
if (op->data.nbytes && op->data.dir == SPI_MEM_DATA_OUT)
fsl_qspi_fill_txfifo(q, op);
err = fsl_qspi_do_op(q, op);
}
/* Invalidate the data in the AHB buffer. */
fsl_qspi_invalidate(q);
mutex_unlock(&q->lock);
return err;
}
static int fsl_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
if (op->data.dir == SPI_MEM_DATA_OUT) {
if (op->data.nbytes > q->devtype_data->txfifo)
op->data.nbytes = q->devtype_data->txfifo;
} else {
if (op->data.nbytes > q->devtype_data->ahb_buf_size)
op->data.nbytes = q->devtype_data->ahb_buf_size;
else if (op->data.nbytes > (q->devtype_data->rxfifo - 4))
op->data.nbytes = ALIGN_DOWN(op->data.nbytes, 8);
}
return 0;
}
static int fsl_qspi_default_setup(struct fsl_qspi *q)
{
void __iomem *base = q->iobase;
u32 reg, addr_offset = 0;
int ret;
/* disable and unprepare clock to avoid glitch pass to controller */
fsl_qspi_clk_disable_unprep(q);
/* the default frequency, we will change it later if necessary. */
ret = clk_set_rate(q->clk, 66000000);
if (ret)
return ret;
ret = fsl_qspi_clk_prep_enable(q);
if (ret)
return ret;
/* Reset the module */
qspi_writel(q, QUADSPI_MCR_SWRSTSD_MASK | QUADSPI_MCR_SWRSTHD_MASK,
base + QUADSPI_MCR);
udelay(1);
/* Disable the module */
qspi_writel(q, QUADSPI_MCR_MDIS_MASK | QUADSPI_MCR_RESERVED_MASK,
base + QUADSPI_MCR);
reg = qspi_readl(q, base + QUADSPI_SMPR);
qspi_writel(q, reg & ~(QUADSPI_SMPR_FSDLY_MASK
| QUADSPI_SMPR_FSPHS_MASK
| QUADSPI_SMPR_HSENA_MASK
| QUADSPI_SMPR_DDRSMP_MASK), base + QUADSPI_SMPR);
/* We only use the buffer3 for AHB read */
qspi_writel(q, 0, base + QUADSPI_BUF0IND);
qspi_writel(q, 0, base + QUADSPI_BUF1IND);
qspi_writel(q, 0, base + QUADSPI_BUF2IND);
qspi_writel(q, QUADSPI_BFGENCR_SEQID(SEQID_LUT),
q->iobase + QUADSPI_BFGENCR);
qspi_writel(q, QUADSPI_RBCT_WMRK_MASK, base + QUADSPI_RBCT);
qspi_writel(q, QUADSPI_BUF3CR_ALLMST_MASK |
QUADSPI_BUF3CR_ADATSZ(q->devtype_data->ahb_buf_size / 8),
base + QUADSPI_BUF3CR);
if (needs_amba_base_offset(q))
addr_offset = q->memmap_phy;
/*
* In HW there can be a maximum of four chips on two buses with
* two chip selects on each bus. We use four chip selects in SW
* to differentiate between the four chips.
* We use ahb_buf_size for each chip and set SFA1AD, SFA2AD, SFB1AD,
* SFB2AD accordingly.
*/
qspi_writel(q, q->devtype_data->ahb_buf_size + addr_offset,
base + QUADSPI_SFA1AD);
qspi_writel(q, q->devtype_data->ahb_buf_size * 2 + addr_offset,
base + QUADSPI_SFA2AD);
qspi_writel(q, q->devtype_data->ahb_buf_size * 3 + addr_offset,
base + QUADSPI_SFB1AD);
qspi_writel(q, q->devtype_data->ahb_buf_size * 4 + addr_offset,
base + QUADSPI_SFB2AD);
q->selected = -1;
/* Enable the module */
qspi_writel(q, QUADSPI_MCR_RESERVED_MASK | QUADSPI_MCR_END_CFG_MASK,
base + QUADSPI_MCR);
/* clear all interrupt status */
qspi_writel(q, 0xffffffff, q->iobase + QUADSPI_FR);
/* enable the interrupt */
qspi_writel(q, QUADSPI_RSER_TFIE, q->iobase + QUADSPI_RSER);
return 0;
}
static const char *fsl_qspi_get_name(struct spi_mem *mem)
{
struct fsl_qspi *q = spi_controller_get_devdata(mem->spi->master);
struct device *dev = &mem->spi->dev;
const char *name;
/*
* In order to keep mtdparts compatible with the old MTD driver at
* mtd/spi-nor/fsl-quadspi.c, we set a custom name derived from the
* platform_device of the controller.
*/
if (of_get_available_child_count(q->dev->of_node) == 1)
return dev_name(q->dev);
name = devm_kasprintf(dev, GFP_KERNEL,
"%s-%d", dev_name(q->dev),
mem->spi->chip_select);
if (!name) {
dev_err(dev, "failed to get memory for custom flash name\n");
return ERR_PTR(-ENOMEM);
}
return name;
}
static const struct spi_controller_mem_ops fsl_qspi_mem_ops = {
.adjust_op_size = fsl_qspi_adjust_op_size,
.supports_op = fsl_qspi_supports_op,
.exec_op = fsl_qspi_exec_op,
.get_name = fsl_qspi_get_name,
};
static int fsl_qspi_probe(struct platform_device *pdev)
{
struct spi_controller *ctlr;
struct device *dev = &pdev->dev;
struct device_node *np = dev->of_node;
struct resource *res;
struct fsl_qspi *q;
int ret;
ctlr = spi_alloc_master(&pdev->dev, sizeof(*q));
if (!ctlr)
return -ENOMEM;
ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD |
SPI_TX_DUAL | SPI_TX_QUAD;
q = spi_controller_get_devdata(ctlr);
q->dev = dev;
q->devtype_data = of_device_get_match_data(dev);
if (!q->devtype_data) {
ret = -ENODEV;
goto err_put_ctrl;
}
platform_set_drvdata(pdev, q);
/* find the resources */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "QuadSPI");
q->iobase = devm_ioremap_resource(dev, res);
if (IS_ERR(q->iobase)) {
ret = PTR_ERR(q->iobase);
goto err_put_ctrl;
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
"QuadSPI-memory");
q->ahb_addr = devm_ioremap_resource(dev, res);
if (IS_ERR(q->ahb_addr)) {
ret = PTR_ERR(q->ahb_addr);
goto err_put_ctrl;
}
q->memmap_phy = res->start;
/* find the clocks */
q->clk_en = devm_clk_get(dev, "qspi_en");
if (IS_ERR(q->clk_en)) {
ret = PTR_ERR(q->clk_en);
goto err_put_ctrl;
}
q->clk = devm_clk_get(dev, "qspi");
if (IS_ERR(q->clk)) {
ret = PTR_ERR(q->clk);
goto err_put_ctrl;
}
ret = fsl_qspi_clk_prep_enable(q);
if (ret) {
dev_err(dev, "can not enable the clock\n");
goto err_put_ctrl;
}
/* find the irq */
ret = platform_get_irq(pdev, 0);
if (ret < 0) {
dev_err(dev, "failed to get the irq: %d\n", ret);
goto err_disable_clk;
}
ret = devm_request_irq(dev, ret,
fsl_qspi_irq_handler, 0, pdev->name, q);
if (ret) {
dev_err(dev, "failed to request irq: %d\n", ret);
goto err_disable_clk;
}
mutex_init(&q->lock);
ctlr->bus_num = -1;
ctlr->num_chipselect = 4;
ctlr->mem_ops = &fsl_qspi_mem_ops;
fsl_qspi_default_setup(q);
ctlr->dev.of_node = np;
ret = spi_register_controller(ctlr);
if (ret)
goto err_destroy_mutex;
return 0;
err_destroy_mutex:
mutex_destroy(&q->lock);
err_disable_clk:
fsl_qspi_clk_disable_unprep(q);
err_put_ctrl:
spi_controller_put(ctlr);
dev_err(dev, "Freescale QuadSPI probe failed\n");
return ret;
}
static int fsl_qspi_remove(struct platform_device *pdev)
{
struct fsl_qspi *q = platform_get_drvdata(pdev);
/* disable the hardware */
qspi_writel(q, QUADSPI_MCR_MDIS_MASK, q->iobase + QUADSPI_MCR);
qspi_writel(q, 0x0, q->iobase + QUADSPI_RSER);
fsl_qspi_clk_disable_unprep(q);
mutex_destroy(&q->lock);
return 0;
}
static int fsl_qspi_suspend(struct device *dev)
{
return 0;
}
static int fsl_qspi_resume(struct device *dev)
{
struct fsl_qspi *q = dev_get_drvdata(dev);
fsl_qspi_default_setup(q);
return 0;
}
static const struct of_device_id fsl_qspi_dt_ids[] = {
{ .compatible = "fsl,vf610-qspi", .data = &vybrid_data, },
{ .compatible = "fsl,imx6sx-qspi", .data = &imx6sx_data, },
{ .compatible = "fsl,imx7d-qspi", .data = &imx7d_data, },
{ .compatible = "fsl,imx6ul-qspi", .data = &imx6ul_data, },
{ .compatible = "fsl,ls1021a-qspi", .data = &ls1021a_data, },
{ .compatible = "fsl,ls2080a-qspi", .data = &ls2080a_data, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_qspi_dt_ids);
static const struct dev_pm_ops fsl_qspi_pm_ops = {
.suspend = fsl_qspi_suspend,
.resume = fsl_qspi_resume,
};
static struct platform_driver fsl_qspi_driver = {
.driver = {
.name = "fsl-quadspi",
.of_match_table = fsl_qspi_dt_ids,
.pm = &fsl_qspi_pm_ops,
},
.probe = fsl_qspi_probe,
.remove = fsl_qspi_remove,
};
module_platform_driver(fsl_qspi_driver);
MODULE_DESCRIPTION("Freescale QuadSPI Controller Driver");
MODULE_AUTHOR("Freescale Semiconductor Inc.");
MODULE_AUTHOR("Boris Brezillon <bbrezillon@kernel.org>");
MODULE_AUTHOR("Frieder Schrempf <frieder.schrempf@kontron.de>");
MODULE_AUTHOR("Yogesh Gaur <yogeshnarayan.gaur@nxp.com>");
MODULE_AUTHOR("Suresh Gupta <suresh.gupta@nxp.com>");
MODULE_LICENSE("GPL v2");

View File

@ -89,9 +89,6 @@ struct spi_geni_master {
int irq;
};
static void handle_fifo_timeout(struct spi_master *spi,
struct spi_message *msg);
static int get_spi_clk_cfg(unsigned int speed_hz,
struct spi_geni_master *mas,
unsigned int *clk_idx,
@ -122,6 +119,32 @@ static int get_spi_clk_cfg(unsigned int speed_hz,
return ret;
}
static void handle_fifo_timeout(struct spi_master *spi,
struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long time_left, flags;
struct geni_se *se = &mas->se;
spin_lock_irqsave(&mas->lock, flags);
reinit_completion(&mas->xfer_done);
mas->cur_mcmd = CMD_CANCEL;
geni_se_cancel_m_cmd(se);
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
spin_unlock_irqrestore(&mas->lock, flags);
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
if (time_left)
return;
spin_lock_irqsave(&mas->lock, flags);
reinit_completion(&mas->xfer_done);
geni_se_abort_m_cmd(se);
spin_unlock_irqrestore(&mas->lock, flags);
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
if (!time_left)
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
}
static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
{
struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
@ -233,7 +256,6 @@ static int spi_geni_prepare_message(struct spi_master *spi,
struct geni_se *se = &mas->se;
geni_se_select_mode(se, GENI_SE_FIFO);
reinit_completion(&mas->xfer_done);
ret = setup_fifo_params(spi_msg->spi, spi);
if (ret)
dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@ -357,32 +379,6 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
}
static void handle_fifo_timeout(struct spi_master *spi,
struct spi_message *msg)
{
struct spi_geni_master *mas = spi_master_get_devdata(spi);
unsigned long time_left, flags;
struct geni_se *se = &mas->se;
spin_lock_irqsave(&mas->lock, flags);
reinit_completion(&mas->xfer_done);
mas->cur_mcmd = CMD_CANCEL;
geni_se_cancel_m_cmd(se);
writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
spin_unlock_irqrestore(&mas->lock, flags);
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
if (time_left)
return;
spin_lock_irqsave(&mas->lock, flags);
reinit_completion(&mas->xfer_done);
geni_se_abort_m_cmd(se);
spin_unlock_irqrestore(&mas->lock, flags);
time_left = wait_for_completion_timeout(&mas->xfer_done, HZ);
if (!time_left)
dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
}
static int spi_geni_transfer_one(struct spi_master *spi,
struct spi_device *slv,
struct spi_transfer *xfer)

View File

@ -552,6 +552,75 @@ void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
}
EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
{
struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
spi_mem_dirmap_destroy(desc);
}
/**
* devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
* it to a device
* @dev: device the dirmap desc will be attached to
* @mem: SPI mem device this direct mapping should be created for
* @info: direct mapping information
*
* devm_ variant of the spi_mem_dirmap_create() function. See
* spi_mem_dirmap_create() for more details.
*
* Return: a valid pointer in case of success, and ERR_PTR() otherwise.
*/
struct spi_mem_dirmap_desc *
devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
const struct spi_mem_dirmap_info *info)
{
struct spi_mem_dirmap_desc **ptr, *desc;
ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
GFP_KERNEL);
if (!ptr)
return ERR_PTR(-ENOMEM);
desc = spi_mem_dirmap_create(mem, info);
if (IS_ERR(desc)) {
devres_free(ptr);
} else {
*ptr = desc;
devres_add(dev, ptr);
}
return desc;
}
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
{
struct spi_mem_dirmap_desc **ptr = res;
if (WARN_ON(!ptr || !*ptr))
return 0;
return *ptr == data;
}
/**
* devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
* to a device
* @dev: device the dirmap desc is attached to
* @desc: the direct mapping descriptor to destroy
*
* devm_ variant of the spi_mem_dirmap_destroy() function. See
* spi_mem_dirmap_destroy() for more details.
*/
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc)
{
devres_release(dev, devm_spi_mem_dirmap_release,
devm_spi_mem_dirmap_match, desc);
}
EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
/**
* spi_mem_dirmap_dirmap_read() - Read data through a direct mapping
* @desc: direct mapping descriptor

View File

@ -39,6 +39,7 @@
#include <linux/stmp_device.h>
#include <linux/spi/spi.h>
#include <linux/spi/mxs-spi.h>
#include <trace/events/spi.h>
#define DRIVER_NAME "mxs-spi"
@ -374,6 +375,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
list_for_each_entry(t, &m->transfers, transfer_list) {
trace_spi_transfer_start(m, t);
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
break;
@ -419,6 +422,8 @@ static int mxs_spi_transfer_one(struct spi_master *master,
flag);
}
trace_spi_transfer_stop(m, t);
if (status) {
stmp_reset_block(ssp->base);
break;

1106
drivers/spi/spi-nxp-fspi.c Normal file

File diff suppressed because it is too large Load Diff

View File

@ -253,6 +253,7 @@
#define STATE_RUNNING ((void *) 1)
#define STATE_DONE ((void *) 2)
#define STATE_ERROR ((void *) -1)
#define STATE_TIMEOUT ((void *) -2)
/*
* SSP State - Whether Enabled or Disabled
@ -1484,6 +1485,30 @@ static void do_interrupt_dma_transfer(struct pl022 *pl022)
writew(irqflags, SSP_IMSC(pl022->virtbase));
}
static void print_current_status(struct pl022 *pl022)
{
u32 read_cr0;
u16 read_cr1, read_dmacr, read_sr;
if (pl022->vendor->extended_cr)
read_cr0 = readl(SSP_CR0(pl022->virtbase));
else
read_cr0 = readw(SSP_CR0(pl022->virtbase));
read_cr1 = readw(SSP_CR1(pl022->virtbase));
read_dmacr = readw(SSP_DMACR(pl022->virtbase));
read_sr = readw(SSP_SR(pl022->virtbase));
dev_warn(&pl022->adev->dev, "spi-pl022 CR0: %x\n", read_cr0);
dev_warn(&pl022->adev->dev, "spi-pl022 CR1: %x\n", read_cr1);
dev_warn(&pl022->adev->dev, "spi-pl022 DMACR: %x\n", read_dmacr);
dev_warn(&pl022->adev->dev, "spi-pl022 SR: %x\n", read_sr);
dev_warn(&pl022->adev->dev,
"spi-pl022 exp_fifo_level/fifodepth: %u/%d\n",
pl022->exp_fifo_level,
pl022->vendor->fifodepth);
}
static void do_polling_transfer(struct pl022 *pl022)
{
struct spi_message *message = NULL;
@ -1535,7 +1560,8 @@ static void do_polling_transfer(struct pl022 *pl022)
if (time_after(time, timeout)) {
dev_warn(&pl022->adev->dev,
"%s: timeout!\n", __func__);
message->state = STATE_ERROR;
message->state = STATE_TIMEOUT;
print_current_status(pl022);
goto out;
}
cpu_relax();
@ -1553,6 +1579,8 @@ static void do_polling_transfer(struct pl022 *pl022)
/* Handle end of message */
if (message->state == STATE_DONE)
message->status = 0;
else if (message->state == STATE_TIMEOUT)
message->status = -EAGAIN;
else
message->status = -EIO;

View File

@ -23,7 +23,7 @@
static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
bool error)
{
struct spi_message *msg = drv_data->master->cur_msg;
struct spi_message *msg = drv_data->controller->cur_msg;
/*
* It is possible that one CPU is handling ROR interrupt and other
@ -59,7 +59,7 @@ static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
msg->status = -EIO;
}
spi_finalize_current_transfer(drv_data->master);
spi_finalize_current_transfer(drv_data->controller);
}
}
@ -74,7 +74,7 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
struct spi_transfer *xfer)
{
struct chip_data *chip =
spi_get_ctldata(drv_data->master->cur_msg->spi);
spi_get_ctldata(drv_data->controller->cur_msg->spi);
enum dma_slave_buswidth width;
struct dma_slave_config cfg;
struct dma_chan *chan;
@ -102,14 +102,14 @@ pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
cfg.dst_maxburst = chip->dma_burst_size;
sgt = &xfer->tx_sg;
chan = drv_data->master->dma_tx;
chan = drv_data->controller->dma_tx;
} else {
cfg.src_addr = drv_data->ssdr_physical;
cfg.src_addr_width = width;
cfg.src_maxburst = chip->dma_burst_size;
sgt = &xfer->rx_sg;
chan = drv_data->master->dma_rx;
chan = drv_data->controller->dma_rx;
}
ret = dmaengine_slave_config(chan, &cfg);
@ -130,8 +130,8 @@ irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
if (status & SSSR_ROR) {
dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
dmaengine_terminate_async(drv_data->master->dma_rx);
dmaengine_terminate_async(drv_data->master->dma_tx);
dmaengine_terminate_async(drv_data->controller->dma_rx);
dmaengine_terminate_async(drv_data->controller->dma_tx);
pxa2xx_spi_dma_transfer_complete(drv_data, true);
return IRQ_HANDLED;
@ -171,15 +171,15 @@ int pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
return 0;
err_rx:
dmaengine_terminate_async(drv_data->master->dma_tx);
dmaengine_terminate_async(drv_data->controller->dma_tx);
err_tx:
return err;
}
void pxa2xx_spi_dma_start(struct driver_data *drv_data)
{
dma_async_issue_pending(drv_data->master->dma_rx);
dma_async_issue_pending(drv_data->master->dma_tx);
dma_async_issue_pending(drv_data->controller->dma_rx);
dma_async_issue_pending(drv_data->controller->dma_tx);
atomic_set(&drv_data->dma_running, 1);
}
@ -187,30 +187,30 @@ void pxa2xx_spi_dma_start(struct driver_data *drv_data)
void pxa2xx_spi_dma_stop(struct driver_data *drv_data)
{
atomic_set(&drv_data->dma_running, 0);
dmaengine_terminate_sync(drv_data->master->dma_rx);
dmaengine_terminate_sync(drv_data->master->dma_tx);
dmaengine_terminate_sync(drv_data->controller->dma_rx);
dmaengine_terminate_sync(drv_data->controller->dma_tx);
}
int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
{
struct pxa2xx_spi_master *pdata = drv_data->master_info;
struct pxa2xx_spi_controller *pdata = drv_data->controller_info;
struct device *dev = &drv_data->pdev->dev;
struct spi_controller *master = drv_data->master;
struct spi_controller *controller = drv_data->controller;
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
master->dma_tx = dma_request_slave_channel_compat(mask,
controller->dma_tx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->tx_param, dev, "tx");
if (!master->dma_tx)
if (!controller->dma_tx)
return -ENODEV;
master->dma_rx = dma_request_slave_channel_compat(mask,
controller->dma_rx = dma_request_slave_channel_compat(mask,
pdata->dma_filter, pdata->rx_param, dev, "rx");
if (!master->dma_rx) {
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
if (!controller->dma_rx) {
dma_release_channel(controller->dma_tx);
controller->dma_tx = NULL;
return -ENODEV;
}
@ -219,17 +219,17 @@ int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
void pxa2xx_spi_dma_release(struct driver_data *drv_data)
{
struct spi_controller *master = drv_data->master;
struct spi_controller *controller = drv_data->controller;
if (master->dma_rx) {
dmaengine_terminate_sync(master->dma_rx);
dma_release_channel(master->dma_rx);
master->dma_rx = NULL;
if (controller->dma_rx) {
dmaengine_terminate_sync(controller->dma_rx);
dma_release_channel(controller->dma_rx);
controller->dma_rx = NULL;
}
if (master->dma_tx) {
dmaengine_terminate_sync(master->dma_tx);
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
if (controller->dma_tx) {
dmaengine_terminate_sync(controller->dma_tx);
dma_release_channel(controller->dma_tx);
controller->dma_tx = NULL;
}
}

View File

@ -197,7 +197,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
struct platform_device_info pi;
int ret;
struct platform_device *pdev;
struct pxa2xx_spi_master spi_pdata;
struct pxa2xx_spi_controller spi_pdata;
struct ssp_device *ssp;
struct pxa_spi_info *c;
char buf[40];
@ -265,7 +265,7 @@ static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
{
struct platform_device *pdev = pci_get_drvdata(dev);
struct pxa2xx_spi_master *spi_pdata;
struct pxa2xx_spi_controller *spi_pdata;
spi_pdata = dev_get_platdata(&pdev->dev);

View File

@ -328,7 +328,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
/* Enable multiblock DMA transfers */
if (drv_data->master_info->enable_dma) {
if (drv_data->controller_info->enable_dma) {
__lpss_ssp_write_priv(drv_data, config->reg_ssp, 1);
if (config->reg_general >= 0) {
@ -368,7 +368,7 @@ static void lpss_ssp_select_cs(struct spi_device *spi,
__lpss_ssp_write_priv(drv_data,
config->reg_cs_ctrl, value);
ndelay(1000000000 /
(drv_data->master->max_speed_hz / 2));
(drv_data->controller->max_speed_hz / 2));
}
}
@ -567,7 +567,7 @@ static int u32_reader(struct driver_data *drv_data)
static void reset_sccr1(struct driver_data *drv_data)
{
struct chip_data *chip =
spi_get_ctldata(drv_data->master->cur_msg->spi);
spi_get_ctldata(drv_data->controller->cur_msg->spi);
u32 sccr1_reg;
sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
@ -599,8 +599,8 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg)
dev_err(&drv_data->pdev->dev, "%s\n", msg);
drv_data->master->cur_msg->status = -EIO;
spi_finalize_current_transfer(drv_data->master);
drv_data->controller->cur_msg->status = -EIO;
spi_finalize_current_transfer(drv_data->controller);
}
static void int_transfer_complete(struct driver_data *drv_data)
@ -611,7 +611,7 @@ static void int_transfer_complete(struct driver_data *drv_data)
if (!pxa25x_ssp_comp(drv_data))
pxa2xx_spi_write(drv_data, SSTO, 0);
spi_finalize_current_transfer(drv_data->master);
spi_finalize_current_transfer(drv_data->controller);
}
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
@ -747,7 +747,7 @@ static irqreturn_t ssp_int(int irq, void *dev_id)
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg & ~drv_data->int_cr1);
pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
if (!drv_data->master->cur_msg) {
if (!drv_data->controller->cur_msg) {
handle_bad_msg(drv_data);
/* Never fail */
return IRQ_HANDLED;
@ -879,7 +879,7 @@ static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
{
unsigned long ssp_clk = drv_data->master->max_speed_hz;
unsigned long ssp_clk = drv_data->controller->max_speed_hz;
const struct ssp_device *ssp = drv_data->ssp;
rate = min_t(int, ssp_clk, rate);
@ -894,7 +894,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
int rate)
{
struct chip_data *chip =
spi_get_ctldata(drv_data->master->cur_msg->spi);
spi_get_ctldata(drv_data->controller->cur_msg->spi);
unsigned int clk_div;
switch (drv_data->ssp_type) {
@ -908,7 +908,7 @@ static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
return clk_div << 8;
}
static bool pxa2xx_spi_can_dma(struct spi_controller *master,
static bool pxa2xx_spi_can_dma(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *xfer)
{
@ -919,12 +919,12 @@ static bool pxa2xx_spi_can_dma(struct spi_controller *master,
xfer->len >= chip->dma_burst_size;
}
static int pxa2xx_spi_transfer_one(struct spi_controller *master,
static int pxa2xx_spi_transfer_one(struct spi_controller *controller,
struct spi_device *spi,
struct spi_transfer *transfer)
{
struct driver_data *drv_data = spi_controller_get_devdata(master);
struct spi_message *message = master->cur_msg;
struct driver_data *drv_data = spi_controller_get_devdata(controller);
struct spi_message *message = controller->cur_msg;
struct chip_data *chip = spi_get_ctldata(message->spi);
u32 dma_thresh = chip->dma_threshold;
u32 dma_burst = chip->dma_burst_size;
@ -1006,9 +1006,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
"DMA burst size reduced to match bits_per_word\n");
}
dma_mapped = master->can_dma &&
master->can_dma(master, message->spi, transfer) &&
master->cur_msg_mapped;
dma_mapped = controller->can_dma &&
controller->can_dma(controller, message->spi, transfer) &&
controller->cur_msg_mapped;
if (dma_mapped) {
/* Ensure we have the correct interrupt handler */
@ -1036,12 +1036,12 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
if (!pxa25x_ssp_comp(drv_data))
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
master->max_speed_hz
controller->max_speed_hz
/ (1 + ((cr0 & SSCR0_SCR(0xfff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
else
dev_dbg(&message->spi->dev, "%u Hz actual, %s\n",
master->max_speed_hz / 2
controller->max_speed_hz / 2
/ (1 + ((cr0 & SSCR0_SCR(0x0ff)) >> 8)),
dma_mapped ? "DMA" : "PIO");
@ -1092,7 +1092,7 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
}
}
if (spi_controller_is_slave(master)) {
if (spi_controller_is_slave(controller)) {
while (drv_data->write(drv_data))
;
if (drv_data->gpiod_ready) {
@ -1111,9 +1111,9 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *master,
return 1;
}
static int pxa2xx_spi_slave_abort(struct spi_master *master)
static int pxa2xx_spi_slave_abort(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(master);
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Stop and reset SSP */
write_SSSR_CS(drv_data, drv_data->clear_sr);
@ -1126,16 +1126,16 @@ static int pxa2xx_spi_slave_abort(struct spi_master *master)
dev_dbg(&drv_data->pdev->dev, "transfer aborted\n");
drv_data->master->cur_msg->status = -EINTR;
spi_finalize_current_transfer(drv_data->master);
drv_data->controller->cur_msg->status = -EINTR;
spi_finalize_current_transfer(drv_data->controller);
return 0;
}
static void pxa2xx_spi_handle_err(struct spi_controller *master,
static void pxa2xx_spi_handle_err(struct spi_controller *controller,
struct spi_message *msg)
{
struct driver_data *drv_data = spi_controller_get_devdata(master);
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP */
pxa2xx_spi_write(drv_data, SSCR0,
@ -1159,9 +1159,9 @@ static void pxa2xx_spi_handle_err(struct spi_controller *master,
pxa2xx_spi_dma_stop(drv_data);
}
static int pxa2xx_spi_unprepare_transfer(struct spi_controller *master)
static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller)
{
struct driver_data *drv_data = spi_controller_get_devdata(master);
struct driver_data *drv_data = spi_controller_get_devdata(controller);
/* Disable the SSP now */
pxa2xx_spi_write(drv_data, SSCR0,
@ -1260,7 +1260,7 @@ static int setup(struct spi_device *spi)
break;
default:
tx_hi_thres = 0;
if (spi_controller_is_slave(drv_data->master)) {
if (spi_controller_is_slave(drv_data->controller)) {
tx_thres = 1;
rx_thres = 2;
} else {
@ -1287,7 +1287,7 @@ static int setup(struct spi_device *spi)
chip->frm = spi->chip_select;
}
chip->enable_dma = drv_data->master_info->enable_dma;
chip->enable_dma = drv_data->controller_info->enable_dma;
chip->timeout = TIMOUT_DFLT;
}
@ -1310,7 +1310,7 @@ static int setup(struct spi_device *spi)
if (chip_info->enable_loopback)
chip->cr1 = SSCR1_LBM;
}
if (spi_controller_is_slave(drv_data->master)) {
if (spi_controller_is_slave(drv_data->controller)) {
chip->cr1 |= SSCR1_SCFR;
chip->cr1 |= SSCR1_SCLKDIR;
chip->cr1 |= SSCR1_SFRMDIR;
@ -1497,10 +1497,10 @@ static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
#endif /* CONFIG_PCI */
static struct pxa2xx_spi_master *
static struct pxa2xx_spi_controller *
pxa2xx_spi_init_pdata(struct platform_device *pdev)
{
struct pxa2xx_spi_master *pdata;
struct pxa2xx_spi_controller *pdata;
struct acpi_device *adev;
struct ssp_device *ssp;
struct resource *res;
@ -1568,10 +1568,10 @@ pxa2xx_spi_init_pdata(struct platform_device *pdev)
return pdata;
}
static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
static int pxa2xx_spi_fw_translate_cs(struct spi_controller *controller,
unsigned int cs)
{
struct driver_data *drv_data = spi_controller_get_devdata(master);
struct driver_data *drv_data = spi_controller_get_devdata(controller);
if (has_acpi_companion(&drv_data->pdev->dev)) {
switch (drv_data->ssp_type) {
@ -1595,8 +1595,8 @@ static int pxa2xx_spi_fw_translate_cs(struct spi_controller *master,
static int pxa2xx_spi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct pxa2xx_spi_master *platform_info;
struct spi_controller *master;
struct pxa2xx_spi_controller *platform_info;
struct spi_controller *controller;
struct driver_data *drv_data;
struct ssp_device *ssp;
const struct lpss_config *config;
@ -1622,37 +1622,37 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
if (platform_info->is_slave)
master = spi_alloc_slave(dev, sizeof(struct driver_data));
controller = spi_alloc_slave(dev, sizeof(struct driver_data));
else
master = spi_alloc_master(dev, sizeof(struct driver_data));
controller = spi_alloc_master(dev, sizeof(struct driver_data));
if (!master) {
dev_err(&pdev->dev, "cannot alloc spi_master\n");
if (!controller) {
dev_err(&pdev->dev, "cannot alloc spi_controller\n");
pxa_ssp_free(ssp);
return -ENOMEM;
}
drv_data = spi_controller_get_devdata(master);
drv_data->master = master;
drv_data->master_info = platform_info;
drv_data = spi_controller_get_devdata(controller);
drv_data->controller = controller;
drv_data->controller_info = platform_info;
drv_data->pdev = pdev;
drv_data->ssp = ssp;
master->dev.of_node = pdev->dev.of_node;
controller->dev.of_node = pdev->dev.of_node;
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
master->bus_num = ssp->port_id;
master->dma_alignment = DMA_ALIGNMENT;
master->cleanup = cleanup;
master->setup = setup;
master->set_cs = pxa2xx_spi_set_cs;
master->transfer_one = pxa2xx_spi_transfer_one;
master->slave_abort = pxa2xx_spi_slave_abort;
master->handle_err = pxa2xx_spi_handle_err;
master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
master->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
master->auto_runtime_pm = true;
master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
controller->bus_num = ssp->port_id;
controller->dma_alignment = DMA_ALIGNMENT;
controller->cleanup = cleanup;
controller->setup = setup;
controller->set_cs = pxa2xx_spi_set_cs;
controller->transfer_one = pxa2xx_spi_transfer_one;
controller->slave_abort = pxa2xx_spi_slave_abort;
controller->handle_err = pxa2xx_spi_handle_err;
controller->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
controller->fw_translate_cs = pxa2xx_spi_fw_translate_cs;
controller->auto_runtime_pm = true;
controller->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX;
drv_data->ssp_type = ssp->type;
@ -1661,10 +1661,10 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (pxa25x_ssp_comp(drv_data)) {
switch (drv_data->ssp_type) {
case QUARK_X1000_SSP:
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
break;
default:
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
break;
}
@ -1673,7 +1673,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data->clear_sr = SSSR_ROR;
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
} else {
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
drv_data->dma_cr1 = DEFAULT_DMA_CR1;
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
@ -1685,7 +1685,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
drv_data);
if (status < 0) {
dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
goto out_error_master_alloc;
goto out_error_controller_alloc;
}
/* Setup DMA if requested */
@ -1695,8 +1695,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
dev_dbg(dev, "no DMA channels available, using PIO\n");
platform_info->enable_dma = false;
} else {
master->can_dma = pxa2xx_spi_can_dma;
master->max_dma_len = MAX_DMA_LEN;
controller->can_dma = pxa2xx_spi_can_dma;
controller->max_dma_len = MAX_DMA_LEN;
}
}
@ -1705,7 +1705,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
if (status)
goto out_error_dma_irq_alloc;
master->max_speed_hz = clk_get_rate(ssp->clk);
controller->max_speed_hz = clk_get_rate(ssp->clk);
/* Load default SSP configuration */
pxa2xx_spi_write(drv_data, SSCR0, 0);
@ -1728,7 +1728,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
break;
default:
if (spi_controller_is_slave(master)) {
if (spi_controller_is_slave(controller)) {
tmp = SSCR1_SCFR |
SSCR1_SCLKDIR |
SSCR1_SFRMDIR |
@ -1741,7 +1741,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
}
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_Motorola | SSCR0_DataSize(8);
if (!spi_controller_is_slave(master))
if (!spi_controller_is_slave(controller))
tmp |= SSCR0_SCR(2);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
break;
@ -1766,24 +1766,24 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
platform_info->num_chipselect = config->cs_num;
}
}
master->num_chipselect = platform_info->num_chipselect;
controller->num_chipselect = platform_info->num_chipselect;
count = gpiod_count(&pdev->dev, "cs");
if (count > 0) {
int i;
master->num_chipselect = max_t(int, count,
master->num_chipselect);
controller->num_chipselect = max_t(int, count,
controller->num_chipselect);
drv_data->cs_gpiods = devm_kcalloc(&pdev->dev,
master->num_chipselect, sizeof(struct gpio_desc *),
controller->num_chipselect, sizeof(struct gpio_desc *),
GFP_KERNEL);
if (!drv_data->cs_gpiods) {
status = -ENOMEM;
goto out_error_clock_enabled;
}
for (i = 0; i < master->num_chipselect; i++) {
for (i = 0; i < controller->num_chipselect; i++) {
struct gpio_desc *gpiod;
gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS);
@ -1816,9 +1816,9 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
/* Register with the SPI framework */
platform_set_drvdata(pdev, drv_data);
status = devm_spi_register_controller(&pdev->dev, master);
status = devm_spi_register_controller(&pdev->dev, controller);
if (status != 0) {
dev_err(&pdev->dev, "problem registering spi master\n");
dev_err(&pdev->dev, "problem registering spi controller\n");
goto out_error_clock_enabled;
}
@ -1833,8 +1833,8 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
pxa2xx_spi_dma_release(drv_data);
free_irq(ssp->irq, drv_data);
out_error_master_alloc:
spi_controller_put(master);
out_error_controller_alloc:
spi_controller_put(controller);
pxa_ssp_free(ssp);
return status;
}
@ -1855,7 +1855,7 @@ static int pxa2xx_spi_remove(struct platform_device *pdev)
clk_disable_unprepare(ssp->clk);
/* Release DMA */
if (drv_data->master_info->enable_dma)
if (drv_data->controller_info->enable_dma)
pxa2xx_spi_dma_release(drv_data);
pm_runtime_put_noidle(&pdev->dev);
@ -1877,7 +1877,7 @@ static int pxa2xx_spi_suspend(struct device *dev)
struct ssp_device *ssp = drv_data->ssp;
int status;
status = spi_controller_suspend(drv_data->master);
status = spi_controller_suspend(drv_data->controller);
if (status != 0)
return status;
pxa2xx_spi_write(drv_data, SSCR0, 0);
@ -1902,7 +1902,7 @@ static int pxa2xx_spi_resume(struct device *dev)
}
/* Start the queue running */
return spi_controller_resume(drv_data->master);
return spi_controller_resume(drv_data->controller);
}
#endif

View File

@ -31,10 +31,10 @@ struct driver_data {
/* SPI framework hookup */
enum pxa_ssp_type ssp_type;
struct spi_controller *master;
struct spi_controller *controller;
/* PXA hookup */
struct pxa2xx_spi_master *master_info;
struct pxa2xx_spi_controller *controller_info;
/* SSP register addresses */
void __iomem *ioaddr;

View File

@ -180,7 +180,7 @@
struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_master *master;
struct spi_controller *ctlr;
wait_queue_head_t wait;
struct clk *clk;
u16 spcmd;
@ -237,8 +237,8 @@ static u16 rspi_read_data(const struct rspi_data *rspi)
/* optional functions */
struct spi_ops {
int (*set_config_register)(struct rspi_data *rspi, int access_size);
int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer);
int (*transfer_one)(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer);
u16 mode_bits;
u16 flags;
u16 fifo_size;
@ -466,7 +466,7 @@ static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
int error = rspi_wait_for_tx_empty(rspi);
if (error < 0) {
dev_err(&rspi->master->dev, "transmit timeout\n");
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return error;
}
rspi_write_data(rspi, data);
@ -480,7 +480,7 @@ static int rspi_data_in(struct rspi_data *rspi)
error = rspi_wait_for_rx_full(rspi);
if (error < 0) {
dev_err(&rspi->master->dev, "receive timeout\n");
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return error;
}
data = rspi_read_data(rspi);
@ -526,8 +526,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
desc_rx = dmaengine_prep_slave_sg(rspi->ctlr->dma_rx, rx->sgl,
rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx) {
ret = -EAGAIN;
@ -546,8 +546,8 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
}
if (tx) {
desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
desc_tx = dmaengine_prep_slave_sg(rspi->ctlr->dma_tx, tx->sgl,
tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
ret = -EAGAIN;
@ -584,9 +584,9 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
/* Now start DMA */
if (rx)
dma_async_issue_pending(rspi->master->dma_rx);
dma_async_issue_pending(rspi->ctlr->dma_rx);
if (tx)
dma_async_issue_pending(rspi->master->dma_tx);
dma_async_issue_pending(rspi->ctlr->dma_tx);
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
@ -594,13 +594,13 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
ret = 0;
} else {
if (!ret) {
dev_err(&rspi->master->dev, "DMA timeout\n");
dev_err(&rspi->ctlr->dev, "DMA timeout\n");
ret = -ETIMEDOUT;
}
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
dmaengine_terminate_all(rspi->ctlr->dma_tx);
if (rx)
dmaengine_terminate_all(rspi->master->dma_rx);
dmaengine_terminate_all(rspi->ctlr->dma_rx);
}
rspi_disable_irq(rspi, irq_mask);
@ -614,12 +614,12 @@ static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
no_dma_tx:
if (rx)
dmaengine_terminate_all(rspi->master->dma_rx);
dmaengine_terminate_all(rspi->ctlr->dma_rx);
no_dma_rx:
if (ret == -EAGAIN) {
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&rspi->master->dev),
dev_name(&rspi->master->dev));
dev_driver_string(&rspi->ctlr->dev),
dev_name(&rspi->ctlr->dev));
}
return ret;
}
@ -660,10 +660,10 @@ static bool __rspi_can_dma(const struct rspi_data *rspi,
return xfer->len > rspi->ops->fifo_size;
}
static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
static bool rspi_can_dma(struct spi_controller *ctlr, struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
return __rspi_can_dma(rspi, xfer);
}
@ -671,7 +671,7 @@ static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
struct spi_transfer *xfer)
{
if (!rspi->master->can_dma || !__rspi_can_dma(rspi, xfer))
if (!rspi->ctlr->can_dma || !__rspi_can_dma(rspi, xfer))
return -EAGAIN;
/* rx_buf can be NULL on RSPI on SH in TX-only Mode */
@ -698,10 +698,10 @@ static int rspi_common_transfer(struct rspi_data *rspi,
return 0;
}
static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
static int rspi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
u8 spcr;
spcr = rspi_read8(rspi, RSPI_SPCR);
@ -716,11 +716,11 @@ static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
return rspi_common_transfer(rspi, xfer);
}
static int rspi_rz_transfer_one(struct spi_master *master,
static int rspi_rz_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
rspi_rz_receive_init(rspi);
@ -739,7 +739,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
if (n == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "transmit timeout\n");
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@ -747,7 +747,7 @@ static int qspi_trigger_transfer_out_in(struct rspi_data *rspi, const u8 *tx,
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "receive timeout\n");
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < n; i++)
@ -785,7 +785,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
if (ret != -EAGAIN)
return ret;
@ -796,7 +796,7 @@ static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_tx_empty(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "transmit timeout\n");
dev_err(&rspi->ctlr->dev, "transmit timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@ -822,7 +822,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
unsigned int i, len;
int ret;
if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
if (rspi->ctlr->can_dma && __rspi_can_dma(rspi, xfer)) {
int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
if (ret != -EAGAIN)
return ret;
@ -833,7 +833,7 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
if (len == QSPI_BUFFER_SIZE) {
ret = rspi_wait_for_rx_full(rspi);
if (ret < 0) {
dev_err(&rspi->master->dev, "receive timeout\n");
dev_err(&rspi->ctlr->dev, "receive timeout\n");
return ret;
}
for (i = 0; i < len; i++)
@ -849,10 +849,10 @@ static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
return 0;
}
static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
struct spi_transfer *xfer)
static int qspi_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi, struct spi_transfer *xfer)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
if (spi->mode & SPI_LOOP) {
return qspi_transfer_out_in(rspi, xfer);
@ -870,7 +870,7 @@ static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
static int rspi_setup(struct spi_device *spi)
{
struct rspi_data *rspi = spi_master_get_devdata(spi->master);
struct rspi_data *rspi = spi_controller_get_devdata(spi->controller);
rspi->max_speed_hz = spi->max_speed_hz;
@ -955,10 +955,10 @@ static int qspi_setup_sequencer(struct rspi_data *rspi,
return 0;
}
static int rspi_prepare_message(struct spi_master *master,
static int rspi_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
int ret;
if (msg->spi->mode &
@ -974,10 +974,10 @@ static int rspi_prepare_message(struct spi_master *master,
return 0;
}
static int rspi_unprepare_message(struct spi_master *master,
static int rspi_unprepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct rspi_data *rspi = spi_master_get_devdata(master);
struct rspi_data *rspi = spi_controller_get_devdata(ctlr);
/* Disable SPI function */
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
@ -1081,7 +1081,7 @@ static struct dma_chan *rspi_request_dma_chan(struct device *dev,
return chan;
}
static int rspi_request_dma(struct device *dev, struct spi_master *master,
static int rspi_request_dma(struct device *dev, struct spi_controller *ctlr,
const struct resource *res)
{
const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
@ -1099,37 +1099,37 @@ static int rspi_request_dma(struct device *dev, struct spi_master *master,
return 0;
}
master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
res->start + RSPI_SPDR);
if (!master->dma_tx)
ctlr->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
res->start + RSPI_SPDR);
if (!ctlr->dma_tx)
return -ENODEV;
master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
res->start + RSPI_SPDR);
if (!master->dma_rx) {
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
ctlr->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
res->start + RSPI_SPDR);
if (!ctlr->dma_rx) {
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
return -ENODEV;
}
master->can_dma = rspi_can_dma;
ctlr->can_dma = rspi_can_dma;
dev_info(dev, "DMA available");
return 0;
}
static void rspi_release_dma(struct spi_master *master)
static void rspi_release_dma(struct spi_controller *ctlr)
{
if (master->dma_tx)
dma_release_channel(master->dma_tx);
if (master->dma_rx)
dma_release_channel(master->dma_rx);
if (ctlr->dma_tx)
dma_release_channel(ctlr->dma_tx);
if (ctlr->dma_rx)
dma_release_channel(ctlr->dma_rx);
}
static int rspi_remove(struct platform_device *pdev)
{
struct rspi_data *rspi = platform_get_drvdata(pdev);
rspi_release_dma(rspi->master);
rspi_release_dma(rspi->ctlr);
pm_runtime_disable(&pdev->dev);
return 0;
@ -1139,7 +1139,7 @@ static const struct spi_ops rspi_ops = {
.set_config_register = rspi_set_config_register,
.transfer_one = rspi_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_MASTER_MUST_TX,
.flags = SPI_CONTROLLER_MUST_TX,
.fifo_size = 8,
};
@ -1147,7 +1147,7 @@ static const struct spi_ops rspi_rz_ops = {
.set_config_register = rspi_rz_set_config_register,
.transfer_one = rspi_rz_transfer_one,
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 8, /* 8 for TX, 32 for RX */
};
@ -1157,7 +1157,7 @@ static const struct spi_ops qspi_ops = {
.mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
SPI_TX_DUAL | SPI_TX_QUAD |
SPI_RX_DUAL | SPI_RX_QUAD,
.flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
.flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX,
.fifo_size = 32,
};
@ -1174,7 +1174,7 @@ static const struct of_device_id rspi_of_match[] = {
MODULE_DEVICE_TABLE(of, rspi_of_match);
static int rspi_parse_dt(struct device *dev, struct spi_master *master)
static int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
u32 num_cs;
int error;
@ -1186,12 +1186,12 @@ static int rspi_parse_dt(struct device *dev, struct spi_master *master)
return error;
}
master->num_chipselect = num_cs;
ctlr->num_chipselect = num_cs;
return 0;
}
#else
#define rspi_of_match NULL
static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
static inline int rspi_parse_dt(struct device *dev, struct spi_controller *ctlr)
{
return -EINVAL;
}
@ -1212,28 +1212,28 @@ static int rspi_request_irq(struct device *dev, unsigned int irq,
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct spi_controller *ctlr;
struct rspi_data *rspi;
int ret;
const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (master == NULL)
ctlr = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (ctlr == NULL)
return -ENOMEM;
ops = of_device_get_match_data(&pdev->dev);
if (ops) {
ret = rspi_parse_dt(&pdev->dev, master);
ret = rspi_parse_dt(&pdev->dev, ctlr);
if (ret)
goto error1;
} else {
ops = (struct spi_ops *)pdev->id_entry->driver_data;
rspi_pd = dev_get_platdata(&pdev->dev);
if (rspi_pd && rspi_pd->num_chipselect)
master->num_chipselect = rspi_pd->num_chipselect;
ctlr->num_chipselect = rspi_pd->num_chipselect;
else
master->num_chipselect = 2; /* default */
ctlr->num_chipselect = 2; /* default */
}
/* ops parameter check */
@ -1243,10 +1243,10 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
rspi = spi_master_get_devdata(master);
rspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
rspi->master = master;
rspi->ctlr = ctlr;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
rspi->addr = devm_ioremap_resource(&pdev->dev, res);
@ -1266,15 +1266,15 @@ static int rspi_probe(struct platform_device *pdev)
init_waitqueue_head(&rspi->wait);
master->bus_num = pdev->id;
master->setup = rspi_setup;
master->auto_runtime_pm = true;
master->transfer_one = ops->transfer_one;
master->prepare_message = rspi_prepare_message;
master->unprepare_message = rspi_unprepare_message;
master->mode_bits = ops->mode_bits;
master->flags = ops->flags;
master->dev.of_node = pdev->dev.of_node;
ctlr->bus_num = pdev->id;
ctlr->setup = rspi_setup;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = ops->transfer_one;
ctlr->prepare_message = rspi_prepare_message;
ctlr->unprepare_message = rspi_unprepare_message;
ctlr->mode_bits = ops->mode_bits;
ctlr->flags = ops->flags;
ctlr->dev.of_node = pdev->dev.of_node;
ret = platform_get_irq_byname(pdev, "rx");
if (ret < 0) {
@ -1311,13 +1311,13 @@ static int rspi_probe(struct platform_device *pdev)
goto error2;
}
ret = rspi_request_dma(&pdev->dev, master, res);
ret = rspi_request_dma(&pdev->dev, ctlr, res);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
ret = devm_spi_register_master(&pdev->dev, master);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error3;
}
@ -1326,11 +1326,11 @@ static int rspi_probe(struct platform_device *pdev)
return 0;
error3:
rspi_release_dma(master);
rspi_release_dma(ctlr);
error2:
pm_runtime_disable(&pdev->dev);
error1:
spi_master_put(master);
spi_controller_put(ctlr);
return ret;
}
@ -1349,14 +1349,14 @@ static int rspi_suspend(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_master_suspend(rspi->master);
return spi_controller_suspend(rspi->ctlr);
}
static int rspi_resume(struct device *dev)
{
struct rspi_data *rspi = dev_get_drvdata(dev);
return spi_master_resume(rspi->master);
return spi_controller_resume(rspi->ctlr);
}
static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);

View File

@ -35,7 +35,7 @@
struct hspi_priv {
void __iomem *addr;
struct spi_master *master;
struct spi_controller *ctlr;
struct device *dev;
struct clk *clk;
};
@ -140,10 +140,10 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
}
static int hspi_transfer_one_message(struct spi_master *master,
static int hspi_transfer_one_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct hspi_priv *hspi = spi_master_get_devdata(master);
struct hspi_priv *hspi = spi_controller_get_devdata(ctlr);
struct spi_transfer *t;
u32 tx;
u32 rx;
@ -205,7 +205,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
ndelay(nsecs);
hspi_hw_cs_disable(hspi);
}
spi_finalize_current_message(master);
spi_finalize_current_message(ctlr);
return ret;
}
@ -213,7 +213,7 @@ static int hspi_transfer_one_message(struct spi_master *master,
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct spi_controller *ctlr;
struct hspi_priv *hspi;
struct clk *clk;
int ret;
@ -225,11 +225,9 @@ static int hspi_probe(struct platform_device *pdev)
return -EINVAL;
}
master = spi_alloc_master(&pdev->dev, sizeof(*hspi));
if (!master) {
dev_err(&pdev->dev, "spi_alloc_master error.\n");
ctlr = spi_alloc_master(&pdev->dev, sizeof(*hspi));
if (!ctlr)
return -ENOMEM;
}
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk)) {
@ -238,33 +236,32 @@ static int hspi_probe(struct platform_device *pdev)
goto error0;
}
hspi = spi_master_get_devdata(master);
hspi = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, hspi);
/* init hspi */
hspi->master = master;
hspi->ctlr = ctlr;
hspi->dev = &pdev->dev;
hspi->clk = clk;
hspi->addr = devm_ioremap(hspi->dev,
res->start, resource_size(res));
if (!hspi->addr) {
dev_err(&pdev->dev, "ioremap error.\n");
ret = -ENOMEM;
goto error1;
}
pm_runtime_enable(&pdev->dev);
master->bus_num = pdev->id;
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
ctlr->bus_num = pdev->id;
ctlr->mode_bits = SPI_CPOL | SPI_CPHA;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one_message = hspi_transfer_one_message;
ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
ret = devm_spi_register_master(&pdev->dev, master);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto error2;
}
@ -275,7 +272,7 @@ static int hspi_probe(struct platform_device *pdev)
error1:
clk_put(clk);
error0:
spi_master_put(master);
spi_controller_put(ctlr);
return ret;
}

View File

@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
* SuperH MSIOF SPI Master Interface
* SuperH MSIOF SPI Controller Interface
*
* Copyright (c) 2009 Magnus Damm
* Copyright (C) 2014 Renesas Electronics Corporation
@ -32,14 +32,15 @@
#include <asm/unaligned.h>
struct sh_msiof_chipdata {
u32 bits_per_word_mask;
u16 tx_fifo_size;
u16 rx_fifo_size;
u16 master_flags;
u16 ctlr_flags;
u16 min_div_pow;
};
struct sh_msiof_spi_priv {
struct spi_master *master;
struct spi_controller *ctlr;
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
@ -287,7 +288,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps);
sh_msiof_write(p, TSCR, scr);
if (!(p->master->flags & SPI_MASTER_MUST_TX))
if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, RSCR, scr);
}
@ -351,14 +352,14 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss,
tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
tmp |= lsb_first << MDR1_BITLSB_SHIFT;
tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
if (spi_controller_is_slave(p->master)) {
if (spi_controller_is_slave(p->ctlr)) {
sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON);
} else {
sh_msiof_write(p, TMDR1,
tmp | MDR1_TRMD | TMDR1_PCON |
(ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT);
}
if (p->master->flags & SPI_MASTER_MUST_TX) {
if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) {
/* These bits are reserved if RX needs TX */
tmp &= ~0x0000ffff;
}
@ -382,7 +383,7 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
{
u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
if (tx_buf || (p->master->flags & SPI_MASTER_MUST_TX))
if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
@ -539,8 +540,9 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
static int sh_msiof_spi_setup(struct spi_device *spi)
{
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
struct device_node *np = spi->controller->dev.of_node;
struct sh_msiof_spi_priv *p =
spi_controller_get_devdata(spi->controller);
u32 clr, set, tmp;
if (!np) {
@ -556,7 +558,7 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
}
if (spi_controller_is_slave(p->master))
if (spi_controller_is_slave(p->ctlr))
return 0;
if (p->native_cs_inited &&
@ -581,10 +583,10 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
return 0;
}
static int sh_msiof_prepare_message(struct spi_master *master,
static int sh_msiof_prepare_message(struct spi_controller *ctlr,
struct spi_message *msg)
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
const struct spi_device *spi = msg->spi;
u32 ss, cs_high;
@ -605,7 +607,7 @@ static int sh_msiof_prepare_message(struct spi_master *master,
static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
{
bool slave = spi_controller_is_slave(p->master);
bool slave = spi_controller_is_slave(p->ctlr);
int ret = 0;
/* setup clock and rx/tx signals */
@ -625,7 +627,7 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
{
bool slave = spi_controller_is_slave(p->master);
bool slave = spi_controller_is_slave(p->ctlr);
int ret = 0;
/* shut down frame, rx/tx and clock signals */
@ -641,9 +643,9 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
return ret;
}
static int sh_msiof_slave_abort(struct spi_master *master)
static int sh_msiof_slave_abort(struct spi_controller *ctlr)
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
p->slave_aborted = true;
complete(&p->done);
@ -654,7 +656,7 @@ static int sh_msiof_slave_abort(struct spi_master *master)
static int sh_msiof_wait_for_completion(struct sh_msiof_spi_priv *p,
struct completion *x)
{
if (spi_controller_is_slave(p->master)) {
if (spi_controller_is_slave(p->ctlr)) {
if (wait_for_completion_interruptible(x) ||
p->slave_aborted) {
dev_dbg(&p->pdev->dev, "interrupted\n");
@ -754,7 +756,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* First prepare and submit the DMA request(s), as this may fail */
if (rx) {
ier_bits |= IER_RDREQE | IER_RDMAE;
desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx,
p->rx_dma_addr, len, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
@ -769,9 +771,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
if (tx) {
ier_bits |= IER_TDREQE | IER_TDMAE;
dma_sync_single_for_device(p->master->dma_tx->device->dev,
dma_sync_single_for_device(p->ctlr->dma_tx->device->dev,
p->tx_dma_addr, len, DMA_TO_DEVICE);
desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx,
p->tx_dma_addr, len, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx) {
@ -803,9 +805,9 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
/* Now start DMA */
if (rx)
dma_async_issue_pending(p->master->dma_rx);
dma_async_issue_pending(p->ctlr->dma_rx);
if (tx)
dma_async_issue_pending(p->master->dma_tx);
dma_async_issue_pending(p->ctlr->dma_tx);
ret = sh_msiof_spi_start(p, rx);
if (ret) {
@ -845,9 +847,8 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
}
if (rx)
dma_sync_single_for_cpu(p->master->dma_rx->device->dev,
p->rx_dma_addr, len,
DMA_FROM_DEVICE);
dma_sync_single_for_cpu(p->ctlr->dma_rx->device->dev,
p->rx_dma_addr, len, DMA_FROM_DEVICE);
return 0;
@ -856,10 +857,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
sh_msiof_spi_stop(p, rx);
stop_dma:
if (tx)
dmaengine_terminate_all(p->master->dma_tx);
dmaengine_terminate_all(p->ctlr->dma_tx);
no_dma_tx:
if (rx)
dmaengine_terminate_all(p->master->dma_rx);
dmaengine_terminate_all(p->ctlr->dma_rx);
sh_msiof_write(p, IER, 0);
return ret;
}
@ -907,11 +908,11 @@ static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
memcpy(dst, src, words * 4);
}
static int sh_msiof_transfer_one(struct spi_master *master,
static int sh_msiof_transfer_one(struct spi_controller *ctlr,
struct spi_device *spi,
struct spi_transfer *t)
{
struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
struct sh_msiof_spi_priv *p = spi_controller_get_devdata(ctlr);
void (*copy32)(u32 *, const u32 *, unsigned int);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
@ -926,10 +927,10 @@ static int sh_msiof_transfer_one(struct spi_master *master,
int ret;
/* setup clocks (clock already enabled in chipselect()) */
if (!spi_controller_is_slave(p->master))
if (!spi_controller_is_slave(p->ctlr))
sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
while (master->dma_tx && len > 15) {
while (ctlr->dma_tx && len > 15) {
/*
* DMA supports 32-bit words only, hence pack 8-bit and 16-bit
* words, with byte resp. word swapping.
@ -937,17 +938,13 @@ static int sh_msiof_transfer_one(struct spi_master *master,
unsigned int l = 0;
if (tx_buf)
l = min(len, p->tx_fifo_size * 4);
l = min(round_down(len, 4), p->tx_fifo_size * 4);
if (rx_buf)
l = min(len, p->rx_fifo_size * 4);
l = min(round_down(len, 4), p->rx_fifo_size * 4);
if (bits <= 8) {
if (l & 3)
break;
copy32 = copy_bswap32;
} else if (bits <= 16) {
if (l & 3)
break;
copy32 = copy_wswap32;
} else {
copy32 = copy_plain32;
@ -1052,23 +1049,28 @@ static int sh_msiof_transfer_one(struct spi_master *master,
}
static const struct sh_msiof_chipdata sh_data = {
.bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
.master_flags = 0,
.ctlr_flags = 0,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen2_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
.master_flags = SPI_MASTER_MUST_TX,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 0,
};
static const struct sh_msiof_chipdata rcar_gen3_data = {
.bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16) |
SPI_BPW_MASK(24) | SPI_BPW_MASK(32),
.tx_fifo_size = 64,
.rx_fifo_size = 64,
.master_flags = SPI_MASTER_MUST_TX,
.ctlr_flags = SPI_CONTROLLER_MUST_TX,
.min_div_pow = 1,
};
@ -1136,7 +1138,7 @@ static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p)
if (ret <= 0)
return 0;
num_cs = max_t(unsigned int, ret, p->master->num_chipselect);
num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect);
for (i = 0; i < num_cs; i++) {
struct gpio_desc *gpiod;
@ -1206,10 +1208,10 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
{
struct platform_device *pdev = p->pdev;
struct device *dev = &pdev->dev;
const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
const struct sh_msiof_spi_info *info = p->info;
unsigned int dma_tx_id, dma_rx_id;
const struct resource *res;
struct spi_master *master;
struct spi_controller *ctlr;
struct device *tx_dev, *rx_dev;
if (dev->of_node) {
@ -1229,17 +1231,15 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
if (!res)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
master = p->master;
master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
dma_tx_id,
res->start + TFDR);
if (!master->dma_tx)
ctlr = p->ctlr;
ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
dma_tx_id, res->start + TFDR);
if (!ctlr->dma_tx)
return -ENODEV;
master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
dma_rx_id,
res->start + RFDR);
if (!master->dma_rx)
ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
dma_rx_id, res->start + RFDR);
if (!ctlr->dma_rx)
goto free_tx_chan;
p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
@ -1250,13 +1250,13 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
if (!p->rx_dma_page)
goto free_tx_page;
tx_dev = master->dma_tx->device->dev;
tx_dev = ctlr->dma_tx->device->dev;
p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
DMA_TO_DEVICE);
if (dma_mapping_error(tx_dev, p->tx_dma_addr))
goto free_rx_page;
rx_dev = master->dma_rx->device->dev;
rx_dev = ctlr->dma_rx->device->dev;
p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(rx_dev, p->rx_dma_addr))
@ -1272,34 +1272,34 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
free_tx_page:
free_page((unsigned long)p->tx_dma_page);
free_rx_chan:
dma_release_channel(master->dma_rx);
dma_release_channel(ctlr->dma_rx);
free_tx_chan:
dma_release_channel(master->dma_tx);
master->dma_tx = NULL;
dma_release_channel(ctlr->dma_tx);
ctlr->dma_tx = NULL;
return -ENODEV;
}
static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
{
struct spi_master *master = p->master;
struct spi_controller *ctlr = p->ctlr;
if (!master->dma_tx)
if (!ctlr->dma_tx)
return;
dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
PAGE_SIZE, DMA_FROM_DEVICE);
dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
PAGE_SIZE, DMA_TO_DEVICE);
dma_unmap_single(ctlr->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE,
DMA_FROM_DEVICE);
dma_unmap_single(ctlr->dma_tx->device->dev, p->tx_dma_addr, PAGE_SIZE,
DMA_TO_DEVICE);
free_page((unsigned long)p->rx_dma_page);
free_page((unsigned long)p->tx_dma_page);
dma_release_channel(master->dma_rx);
dma_release_channel(master->dma_tx);
dma_release_channel(ctlr->dma_rx);
dma_release_channel(ctlr->dma_tx);
}
static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct spi_master *master;
struct spi_controller *ctlr;
const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct sh_msiof_spi_priv *p;
@ -1320,18 +1320,18 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
}
if (info->mode == MSIOF_SPI_SLAVE)
master = spi_alloc_slave(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
ctlr = spi_alloc_slave(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
else
master = spi_alloc_master(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
if (master == NULL)
ctlr = spi_alloc_master(&pdev->dev,
sizeof(struct sh_msiof_spi_priv));
if (ctlr == NULL)
return -ENOMEM;
p = spi_master_get_devdata(master);
p = spi_controller_get_devdata(ctlr);
platform_set_drvdata(pdev, p);
p->master = master;
p->ctlr = ctlr;
p->info = info;
p->min_div_pow = chipdata->min_div_pow;
@ -1378,31 +1378,31 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p->rx_fifo_size = p->info->rx_fifo_override;
/* Setup GPIO chip selects */
master->num_chipselect = p->info->num_chipselect;
ctlr->num_chipselect = p->info->num_chipselect;
ret = sh_msiof_get_cs_gpios(p);
if (ret)
goto err1;
/* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
master->flags = chipdata->master_flags;
master->bus_num = pdev->id;
master->dev.of_node = pdev->dev.of_node;
master->setup = sh_msiof_spi_setup;
master->prepare_message = sh_msiof_prepare_message;
master->slave_abort = sh_msiof_slave_abort;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
master->auto_runtime_pm = true;
master->transfer_one = sh_msiof_transfer_one;
/* init controller code */
ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
ctlr->flags = chipdata->ctlr_flags;
ctlr->bus_num = pdev->id;
ctlr->dev.of_node = pdev->dev.of_node;
ctlr->setup = sh_msiof_spi_setup;
ctlr->prepare_message = sh_msiof_prepare_message;
ctlr->slave_abort = sh_msiof_slave_abort;
ctlr->bits_per_word_mask = chipdata->bits_per_word_mask;
ctlr->auto_runtime_pm = true;
ctlr->transfer_one = sh_msiof_transfer_one;
ret = sh_msiof_request_dma(p);
if (ret < 0)
dev_warn(&pdev->dev, "DMA not available, using PIO\n");
ret = devm_spi_register_master(&pdev->dev, master);
ret = devm_spi_register_controller(&pdev->dev, ctlr);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
dev_err(&pdev->dev, "devm_spi_register_controller error.\n");
goto err2;
}
@ -1412,7 +1412,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
sh_msiof_release_dma(p);
pm_runtime_disable(&pdev->dev);
err1:
spi_master_put(master);
spi_controller_put(ctlr);
return ret;
}
@ -1436,14 +1436,14 @@ static int sh_msiof_spi_suspend(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
return spi_master_suspend(p->master);
return spi_controller_suspend(p->ctlr);
}
static int sh_msiof_spi_resume(struct device *dev)
{
struct sh_msiof_spi_priv *p = dev_get_drvdata(dev);
return spi_master_resume(p->master);
return spi_controller_resume(p->ctlr);
}
static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
@ -1465,7 +1465,7 @@ static struct platform_driver sh_msiof_spi_drv = {
};
module_platform_driver(sh_msiof_spi_drv);
MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
MODULE_DESCRIPTION("SuperH MSIOF SPI Controller Interface Driver");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:spi_sh_msiof");

448
drivers/spi/spi-sifive.c Normal file
View File

@ -0,0 +1,448 @@
// SPDX-License-Identifier: GPL-2.0
//
// Copyright 2018 SiFive, Inc.
//
// SiFive SPI controller driver (master mode only)
//
// Author: SiFive, Inc.
// sifive@sifive.com
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/io.h>
#include <linux/log2.h>
#define SIFIVE_SPI_DRIVER_NAME "sifive_spi"
#define SIFIVE_SPI_MAX_CS 32
#define SIFIVE_SPI_DEFAULT_DEPTH 8
#define SIFIVE_SPI_DEFAULT_MAX_BITS 8
/* register offsets */
#define SIFIVE_SPI_REG_SCKDIV 0x00 /* Serial clock divisor */
#define SIFIVE_SPI_REG_SCKMODE 0x04 /* Serial clock mode */
#define SIFIVE_SPI_REG_CSID 0x10 /* Chip select ID */
#define SIFIVE_SPI_REG_CSDEF 0x14 /* Chip select default */
#define SIFIVE_SPI_REG_CSMODE 0x18 /* Chip select mode */
#define SIFIVE_SPI_REG_DELAY0 0x28 /* Delay control 0 */
#define SIFIVE_SPI_REG_DELAY1 0x2c /* Delay control 1 */
#define SIFIVE_SPI_REG_FMT 0x40 /* Frame format */
#define SIFIVE_SPI_REG_TXDATA 0x48 /* Tx FIFO data */
#define SIFIVE_SPI_REG_RXDATA 0x4c /* Rx FIFO data */
#define SIFIVE_SPI_REG_TXMARK 0x50 /* Tx FIFO watermark */
#define SIFIVE_SPI_REG_RXMARK 0x54 /* Rx FIFO watermark */
#define SIFIVE_SPI_REG_FCTRL 0x60 /* SPI flash interface control */
#define SIFIVE_SPI_REG_FFMT 0x64 /* SPI flash instruction format */
#define SIFIVE_SPI_REG_IE 0x70 /* Interrupt Enable Register */
#define SIFIVE_SPI_REG_IP 0x74 /* Interrupt Pendings Register */
/* sckdiv bits */
#define SIFIVE_SPI_SCKDIV_DIV_MASK 0xfffU
/* sckmode bits */
#define SIFIVE_SPI_SCKMODE_PHA BIT(0)
#define SIFIVE_SPI_SCKMODE_POL BIT(1)
#define SIFIVE_SPI_SCKMODE_MODE_MASK (SIFIVE_SPI_SCKMODE_PHA | \
SIFIVE_SPI_SCKMODE_POL)
/* csmode bits */
#define SIFIVE_SPI_CSMODE_MODE_AUTO 0U
#define SIFIVE_SPI_CSMODE_MODE_HOLD 2U
#define SIFIVE_SPI_CSMODE_MODE_OFF 3U
/* delay0 bits */
#define SIFIVE_SPI_DELAY0_CSSCK(x) ((u32)(x))
#define SIFIVE_SPI_DELAY0_CSSCK_MASK 0xffU
#define SIFIVE_SPI_DELAY0_SCKCS(x) ((u32)(x) << 16)
#define SIFIVE_SPI_DELAY0_SCKCS_MASK (0xffU << 16)
/* delay1 bits */
#define SIFIVE_SPI_DELAY1_INTERCS(x) ((u32)(x))
#define SIFIVE_SPI_DELAY1_INTERCS_MASK 0xffU
#define SIFIVE_SPI_DELAY1_INTERXFR(x) ((u32)(x) << 16)
#define SIFIVE_SPI_DELAY1_INTERXFR_MASK (0xffU << 16)
/* fmt bits */
#define SIFIVE_SPI_FMT_PROTO_SINGLE 0U
#define SIFIVE_SPI_FMT_PROTO_DUAL 1U
#define SIFIVE_SPI_FMT_PROTO_QUAD 2U
#define SIFIVE_SPI_FMT_PROTO_MASK 3U
#define SIFIVE_SPI_FMT_ENDIAN BIT(2)
#define SIFIVE_SPI_FMT_DIR BIT(3)
#define SIFIVE_SPI_FMT_LEN(x) ((u32)(x) << 16)
#define SIFIVE_SPI_FMT_LEN_MASK (0xfU << 16)
/* txdata bits */
#define SIFIVE_SPI_TXDATA_DATA_MASK 0xffU
#define SIFIVE_SPI_TXDATA_FULL BIT(31)
/* rxdata bits */
#define SIFIVE_SPI_RXDATA_DATA_MASK 0xffU
#define SIFIVE_SPI_RXDATA_EMPTY BIT(31)
/* ie and ip bits */
#define SIFIVE_SPI_IP_TXWM BIT(0)
#define SIFIVE_SPI_IP_RXWM BIT(1)
struct sifive_spi {
void __iomem *regs; /* virt. address of control registers */
struct clk *clk; /* bus clock */
unsigned int fifo_depth; /* fifo depth in words */
u32 cs_inactive; /* level of the CS pins when inactive */
struct completion done; /* wake-up from interrupt */
};
static void sifive_spi_write(struct sifive_spi *spi, int offset, u32 value)
{
iowrite32(value, spi->regs + offset);
}
static u32 sifive_spi_read(struct sifive_spi *spi, int offset)
{
return ioread32(spi->regs + offset);
}
static void sifive_spi_init(struct sifive_spi *spi)
{
/* Watermark interrupts are disabled by default */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
/* Default watermark FIFO threshold values */
sifive_spi_write(spi, SIFIVE_SPI_REG_TXMARK, 1);
sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK, 0);
/* Set CS/SCK Delays and Inactive Time to defaults */
sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY0,
SIFIVE_SPI_DELAY0_CSSCK(1) |
SIFIVE_SPI_DELAY0_SCKCS(1));
sifive_spi_write(spi, SIFIVE_SPI_REG_DELAY1,
SIFIVE_SPI_DELAY1_INTERCS(1) |
SIFIVE_SPI_DELAY1_INTERXFR(0));
/* Exit specialized memory-mapped SPI flash mode */
sifive_spi_write(spi, SIFIVE_SPI_REG_FCTRL, 0);
}
static int
sifive_spi_prepare_message(struct spi_master *master, struct spi_message *msg)
{
struct sifive_spi *spi = spi_master_get_devdata(master);
struct spi_device *device = msg->spi;
/* Update the chip select polarity */
if (device->mode & SPI_CS_HIGH)
spi->cs_inactive &= ~BIT(device->chip_select);
else
spi->cs_inactive |= BIT(device->chip_select);
sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
/* Select the correct device */
sifive_spi_write(spi, SIFIVE_SPI_REG_CSID, device->chip_select);
/* Set clock mode */
sifive_spi_write(spi, SIFIVE_SPI_REG_SCKMODE,
device->mode & SIFIVE_SPI_SCKMODE_MODE_MASK);
return 0;
}
static void sifive_spi_set_cs(struct spi_device *device, bool is_high)
{
struct sifive_spi *spi = spi_master_get_devdata(device->master);
/* Reverse polarity is handled by SCMR/CPOL. Not inverted CS. */
if (device->mode & SPI_CS_HIGH)
is_high = !is_high;
sifive_spi_write(spi, SIFIVE_SPI_REG_CSMODE, is_high ?
SIFIVE_SPI_CSMODE_MODE_AUTO :
SIFIVE_SPI_CSMODE_MODE_HOLD);
}
static int
sifive_spi_prep_transfer(struct sifive_spi *spi, struct spi_device *device,
struct spi_transfer *t)
{
u32 cr;
unsigned int mode;
/* Calculate and program the clock rate */
cr = DIV_ROUND_UP(clk_get_rate(spi->clk) >> 1, t->speed_hz) - 1;
cr &= SIFIVE_SPI_SCKDIV_DIV_MASK;
sifive_spi_write(spi, SIFIVE_SPI_REG_SCKDIV, cr);
mode = max_t(unsigned int, t->rx_nbits, t->tx_nbits);
/* Set frame format */
cr = SIFIVE_SPI_FMT_LEN(t->bits_per_word);
switch (mode) {
case SPI_NBITS_QUAD:
cr |= SIFIVE_SPI_FMT_PROTO_QUAD;
break;
case SPI_NBITS_DUAL:
cr |= SIFIVE_SPI_FMT_PROTO_DUAL;
break;
default:
cr |= SIFIVE_SPI_FMT_PROTO_SINGLE;
break;
}
if (device->mode & SPI_LSB_FIRST)
cr |= SIFIVE_SPI_FMT_ENDIAN;
if (!t->rx_buf)
cr |= SIFIVE_SPI_FMT_DIR;
sifive_spi_write(spi, SIFIVE_SPI_REG_FMT, cr);
/* We will want to poll if the time we need to wait is
* less than the context switching time.
* Let's call that threshold 5us. The operation will take:
* (8/mode) * fifo_depth / hz <= 5 * 10^-6
* 1600000 * fifo_depth <= hz * mode
*/
return 1600000 * spi->fifo_depth <= t->speed_hz * mode;
}
static irqreturn_t sifive_spi_irq(int irq, void *dev_id)
{
struct sifive_spi *spi = dev_id;
u32 ip = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
if (ip & (SIFIVE_SPI_IP_TXWM | SIFIVE_SPI_IP_RXWM)) {
/* Disable interrupts until next transfer */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
complete(&spi->done);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static void sifive_spi_wait(struct sifive_spi *spi, u32 bit, int poll)
{
if (poll) {
u32 cr;
do {
cr = sifive_spi_read(spi, SIFIVE_SPI_REG_IP);
} while (!(cr & bit));
} else {
reinit_completion(&spi->done);
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, bit);
wait_for_completion(&spi->done);
}
}
static void sifive_spi_tx(struct sifive_spi *spi, const u8 *tx_ptr)
{
WARN_ON_ONCE((sifive_spi_read(spi, SIFIVE_SPI_REG_TXDATA)
& SIFIVE_SPI_TXDATA_FULL) != 0);
sifive_spi_write(spi, SIFIVE_SPI_REG_TXDATA,
*tx_ptr & SIFIVE_SPI_TXDATA_DATA_MASK);
}
static void sifive_spi_rx(struct sifive_spi *spi, u8 *rx_ptr)
{
u32 data = sifive_spi_read(spi, SIFIVE_SPI_REG_RXDATA);
WARN_ON_ONCE((data & SIFIVE_SPI_RXDATA_EMPTY) != 0);
*rx_ptr = data & SIFIVE_SPI_RXDATA_DATA_MASK;
}
static int
sifive_spi_transfer_one(struct spi_master *master, struct spi_device *device,
struct spi_transfer *t)
{
struct sifive_spi *spi = spi_master_get_devdata(master);
int poll = sifive_spi_prep_transfer(spi, device, t);
const u8 *tx_ptr = t->tx_buf;
u8 *rx_ptr = t->rx_buf;
unsigned int remaining_words = t->len;
while (remaining_words) {
unsigned int n_words = min(remaining_words, spi->fifo_depth);
unsigned int i;
/* Enqueue n_words for transmission */
for (i = 0; i < n_words; i++)
sifive_spi_tx(spi, tx_ptr++);
if (rx_ptr) {
/* Wait for transmission + reception to complete */
sifive_spi_write(spi, SIFIVE_SPI_REG_RXMARK,
n_words - 1);
sifive_spi_wait(spi, SIFIVE_SPI_IP_RXWM, poll);
/* Read out all the data from the RX FIFO */
for (i = 0; i < n_words; i++)
sifive_spi_rx(spi, rx_ptr++);
} else {
/* Wait for transmission to complete */
sifive_spi_wait(spi, SIFIVE_SPI_IP_TXWM, poll);
}
remaining_words -= n_words;
}
return 0;
}
static int sifive_spi_probe(struct platform_device *pdev)
{
struct sifive_spi *spi;
struct resource *res;
int ret, irq, num_cs;
u32 cs_bits, max_bits_per_word;
struct spi_master *master;
master = spi_alloc_master(&pdev->dev, sizeof(struct sifive_spi));
if (!master) {
dev_err(&pdev->dev, "out of memory\n");
return -ENOMEM;
}
spi = spi_master_get_devdata(master);
init_completion(&spi->done);
platform_set_drvdata(pdev, master);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(spi->regs)) {
ret = PTR_ERR(spi->regs);
goto put_master;
}
spi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(spi->clk)) {
dev_err(&pdev->dev, "Unable to find bus clock\n");
ret = PTR_ERR(spi->clk);
goto put_master;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "Unable to find interrupt\n");
ret = irq;
goto put_master;
}
/* Optional parameters */
ret =
of_property_read_u32(pdev->dev.of_node, "sifive,fifo-depth",
&spi->fifo_depth);
if (ret < 0)
spi->fifo_depth = SIFIVE_SPI_DEFAULT_DEPTH;
ret =
of_property_read_u32(pdev->dev.of_node, "sifive,max-bits-per-word",
&max_bits_per_word);
if (!ret && max_bits_per_word < 8) {
dev_err(&pdev->dev, "Only 8bit SPI words supported by the driver\n");
ret = -EINVAL;
goto put_master;
}
/* Spin up the bus clock before hitting registers */
ret = clk_prepare_enable(spi->clk);
if (ret) {
dev_err(&pdev->dev, "Unable to enable bus clock\n");
goto put_master;
}
/* probe the number of CS lines */
spi->cs_inactive = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, 0xffffffffU);
cs_bits = sifive_spi_read(spi, SIFIVE_SPI_REG_CSDEF);
sifive_spi_write(spi, SIFIVE_SPI_REG_CSDEF, spi->cs_inactive);
if (!cs_bits) {
dev_err(&pdev->dev, "Could not auto probe CS lines\n");
ret = -EINVAL;
goto put_master;
}
num_cs = ilog2(cs_bits) + 1;
if (num_cs > SIFIVE_SPI_MAX_CS) {
dev_err(&pdev->dev, "Invalid number of spi slaves\n");
ret = -EINVAL;
goto put_master;
}
/* Define our master */
master->dev.of_node = pdev->dev.of_node;
master->bus_num = pdev->id;
master->num_chipselect = num_cs;
master->mode_bits = SPI_CPHA | SPI_CPOL
| SPI_CS_HIGH | SPI_LSB_FIRST
| SPI_TX_DUAL | SPI_TX_QUAD
| SPI_RX_DUAL | SPI_RX_QUAD;
/* TODO: add driver support for bits_per_word < 8
* we need to "left-align" the bits (unless SPI_LSB_FIRST)
*/
master->bits_per_word_mask = SPI_BPW_MASK(8);
master->flags = SPI_CONTROLLER_MUST_TX | SPI_MASTER_GPIO_SS;
master->prepare_message = sifive_spi_prepare_message;
master->set_cs = sifive_spi_set_cs;
master->transfer_one = sifive_spi_transfer_one;
pdev->dev.dma_mask = NULL;
/* Configure the SPI master hardware */
sifive_spi_init(spi);
/* Register for SPI Interrupt */
ret = devm_request_irq(&pdev->dev, irq, sifive_spi_irq, 0,
dev_name(&pdev->dev), spi);
if (ret) {
dev_err(&pdev->dev, "Unable to bind to interrupt\n");
goto put_master;
}
dev_info(&pdev->dev, "mapped; irq=%d, cs=%d\n",
irq, master->num_chipselect);
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master failed\n");
goto put_master;
}
return 0;
put_master:
spi_master_put(master);
return ret;
}
static int sifive_spi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct sifive_spi *spi = spi_master_get_devdata(master);
/* Disable all the interrupts just in case */
sifive_spi_write(spi, SIFIVE_SPI_REG_IE, 0);
return 0;
}
static const struct of_device_id sifive_spi_of_match[] = {
{ .compatible = "sifive,spi0", },
{}
};
MODULE_DEVICE_TABLE(of, sifive_spi_of_match);
static struct platform_driver sifive_spi_driver = {
.probe = sifive_spi_probe,
.remove = sifive_spi_remove,
.driver = {
.name = SIFIVE_SPI_DRIVER_NAME,
.of_match_table = sifive_spi_of_match,
},
};
module_platform_driver(sifive_spi_driver);
MODULE_AUTHOR("SiFive, Inc. <sifive@sifive.com>");
MODULE_DESCRIPTION("SiFive SPI driver");
MODULE_LICENSE("GPL");

View File

@ -2,6 +2,9 @@
// Copyright (C) 2018 Spreadtrum Communications Inc.
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dma/sprd-dma.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
@ -9,6 +12,7 @@
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_dma.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
@ -128,11 +132,28 @@
#define SPRD_SPI_DEFAULT_SOURCE 26000000
#define SPRD_SPI_MAX_SPEED_HZ 48000000
#define SPRD_SPI_AUTOSUSPEND_DELAY 100
#define SPRD_SPI_DMA_STEP 8
enum sprd_spi_dma_channel {
SPRD_SPI_RX,
SPRD_SPI_TX,
SPRD_SPI_MAX,
};
struct sprd_spi_dma {
bool enable;
struct dma_chan *dma_chan[SPRD_SPI_MAX];
enum dma_slave_buswidth width;
u32 fragmens_len;
u32 rx_len;
};
struct sprd_spi {
void __iomem *base;
phys_addr_t phy_base;
struct device *dev;
struct clk *clk;
int irq;
u32 src_clk;
u32 hw_mode;
u32 trans_len;
@ -141,6 +162,8 @@ struct sprd_spi {
u32 hw_speed_hz;
u32 len;
int status;
struct sprd_spi_dma dma;
struct completion xfer_completion;
const void *tx_buf;
void *rx_buf;
int (*read_bufs)(struct sprd_spi *ss, u32 len);
@ -431,6 +454,208 @@ static int sprd_spi_txrx_bufs(struct spi_device *sdev, struct spi_transfer *t)
return ret;
}
static void sprd_spi_irq_enable(struct sprd_spi *ss)
{
u32 val;
/* Clear interrupt status before enabling interrupt. */
writel_relaxed(SPRD_SPI_TX_END_CLR | SPRD_SPI_RX_END_CLR,
ss->base + SPRD_SPI_INT_CLR);
/* Enable SPI interrupt only in DMA mode. */
val = readl_relaxed(ss->base + SPRD_SPI_INT_EN);
writel_relaxed(val | SPRD_SPI_TX_END_INT_EN |
SPRD_SPI_RX_END_INT_EN,
ss->base + SPRD_SPI_INT_EN);
}
static void sprd_spi_irq_disable(struct sprd_spi *ss)
{
writel_relaxed(0, ss->base + SPRD_SPI_INT_EN);
}
static void sprd_spi_dma_enable(struct sprd_spi *ss, bool enable)
{
u32 val = readl_relaxed(ss->base + SPRD_SPI_CTL2);
if (enable)
val |= SPRD_SPI_DMA_EN;
else
val &= ~SPRD_SPI_DMA_EN;
writel_relaxed(val, ss->base + SPRD_SPI_CTL2);
}
static int sprd_spi_dma_submit(struct dma_chan *dma_chan,
struct dma_slave_config *c,
struct sg_table *sg,
enum dma_transfer_direction dir)
{
struct dma_async_tx_descriptor *desc;
dma_cookie_t cookie;
unsigned long flags;
int ret;
ret = dmaengine_slave_config(dma_chan, c);
if (ret < 0)
return ret;
flags = SPRD_DMA_FLAGS(SPRD_DMA_CHN_MODE_NONE, SPRD_DMA_NO_TRG,
SPRD_DMA_FRAG_REQ, SPRD_DMA_TRANS_INT);
desc = dmaengine_prep_slave_sg(dma_chan, sg->sgl, sg->nents, dir, flags);
if (!desc)
return -ENODEV;
cookie = dmaengine_submit(desc);
if (dma_submit_error(cookie))
return dma_submit_error(cookie);
dma_async_issue_pending(dma_chan);
return 0;
}
static int sprd_spi_dma_rx_config(struct sprd_spi *ss, struct spi_transfer *t)
{
struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_RX];
struct dma_slave_config config = {
.src_addr = ss->phy_base,
.src_addr_width = ss->dma.width,
.dst_addr_width = ss->dma.width,
.dst_maxburst = ss->dma.fragmens_len,
};
int ret;
ret = sprd_spi_dma_submit(dma_chan, &config, &t->rx_sg, DMA_DEV_TO_MEM);
if (ret)
return ret;
return ss->dma.rx_len;
}
static int sprd_spi_dma_tx_config(struct sprd_spi *ss, struct spi_transfer *t)
{
struct dma_chan *dma_chan = ss->dma.dma_chan[SPRD_SPI_TX];
struct dma_slave_config config = {
.dst_addr = ss->phy_base,
.src_addr_width = ss->dma.width,
.dst_addr_width = ss->dma.width,
.src_maxburst = ss->dma.fragmens_len,
};
int ret;
ret = sprd_spi_dma_submit(dma_chan, &config, &t->tx_sg, DMA_MEM_TO_DEV);
if (ret)
return ret;
return t->len;
}
static int sprd_spi_dma_request(struct sprd_spi *ss)
{
ss->dma.dma_chan[SPRD_SPI_RX] = dma_request_chan(ss->dev, "rx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_RX])) {
if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]) == -EPROBE_DEFER)
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
dev_err(ss->dev, "request RX DMA channel failed!\n");
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_RX]);
}
ss->dma.dma_chan[SPRD_SPI_TX] = dma_request_chan(ss->dev, "tx_chn");
if (IS_ERR_OR_NULL(ss->dma.dma_chan[SPRD_SPI_TX])) {
if (PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]) == -EPROBE_DEFER)
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
dev_err(ss->dev, "request TX DMA channel failed!\n");
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
return PTR_ERR(ss->dma.dma_chan[SPRD_SPI_TX]);
}
return 0;
}
static void sprd_spi_dma_release(struct sprd_spi *ss)
{
if (ss->dma.dma_chan[SPRD_SPI_RX])
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_RX]);
if (ss->dma.dma_chan[SPRD_SPI_TX])
dma_release_channel(ss->dma.dma_chan[SPRD_SPI_TX]);
}
static int sprd_spi_dma_txrx_bufs(struct spi_device *sdev,
struct spi_transfer *t)
{
struct sprd_spi *ss = spi_master_get_devdata(sdev->master);
u32 trans_len = ss->trans_len;
int ret, write_size = 0;
reinit_completion(&ss->xfer_completion);
sprd_spi_irq_enable(ss);
if (ss->trans_mode & SPRD_SPI_TX_MODE) {
write_size = sprd_spi_dma_tx_config(ss, t);
sprd_spi_set_tx_length(ss, trans_len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to transfer.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_tx_req(ss);
} else {
sprd_spi_set_rx_length(ss, trans_len);
/*
* For our 3 wires mode or dual TX line mode, we need
* to request the controller to read.
*/
if (ss->hw_mode & SPI_3WIRE || ss->hw_mode & SPI_TX_DUAL)
sprd_spi_rx_req(ss);
else
write_size = ss->write_bufs(ss, trans_len);
}
if (write_size < 0) {
ret = write_size;
dev_err(ss->dev, "failed to write, ret = %d\n", ret);
goto trans_complete;
}
if (ss->trans_mode & SPRD_SPI_RX_MODE) {
/*
* Set up the DMA receive data length, which must be an
* integral multiple of fragment length. But when the length
* of received data is less than fragment length, DMA can be
* configured to receive data according to the actual length
* of received data.
*/
ss->dma.rx_len = t->len > ss->dma.fragmens_len ?
(t->len - t->len % ss->dma.fragmens_len) :
t->len;
ret = sprd_spi_dma_rx_config(ss, t);
if (ret < 0) {
dev_err(&sdev->dev,
"failed to configure rx DMA, ret = %d\n", ret);
goto trans_complete;
}
}
sprd_spi_dma_enable(ss, true);
wait_for_completion(&(ss->xfer_completion));
if (ss->trans_mode & SPRD_SPI_TX_MODE)
ret = write_size;
else
ret = ss->dma.rx_len;
trans_complete:
sprd_spi_dma_enable(ss, false);
sprd_spi_enter_idle(ss);
sprd_spi_irq_disable(ss);
return ret;
}
static void sprd_spi_set_speed(struct sprd_spi *ss, u32 speed_hz)
{
/*
@ -516,16 +741,22 @@ static int sprd_spi_setup_transfer(struct spi_device *sdev,
ss->trans_len = t->len;
ss->read_bufs = sprd_spi_read_bufs_u8;
ss->write_bufs = sprd_spi_write_bufs_u8;
ss->dma.width = DMA_SLAVE_BUSWIDTH_1_BYTE;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP;
break;
case 16:
ss->trans_len = t->len >> 1;
ss->read_bufs = sprd_spi_read_bufs_u16;
ss->write_bufs = sprd_spi_write_bufs_u16;
ss->dma.width = DMA_SLAVE_BUSWIDTH_2_BYTES;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 1;
break;
case 32:
ss->trans_len = t->len >> 2;
ss->read_bufs = sprd_spi_read_bufs_u32;
ss->write_bufs = sprd_spi_write_bufs_u32;
ss->dma.width = DMA_SLAVE_BUSWIDTH_4_BYTES;
ss->dma.fragmens_len = SPRD_SPI_DMA_STEP << 2;
break;
default:
return -EINVAL;
@ -563,7 +794,11 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr,
if (ret)
goto setup_err;
ret = sprd_spi_txrx_bufs(sdev, t);
if (sctlr->can_dma(sctlr, sdev, t))
ret = sprd_spi_dma_txrx_bufs(sdev, t);
else
ret = sprd_spi_txrx_bufs(sdev, t);
if (ret == t->len)
ret = 0;
else if (ret >= 0)
@ -575,6 +810,53 @@ static int sprd_spi_transfer_one(struct spi_controller *sctlr,
return ret;
}
static irqreturn_t sprd_spi_handle_irq(int irq, void *data)
{
struct sprd_spi *ss = (struct sprd_spi *)data;
u32 val = readl_relaxed(ss->base + SPRD_SPI_INT_MASK_STS);
if (val & SPRD_SPI_MASK_TX_END) {
writel_relaxed(SPRD_SPI_TX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
if (!(ss->trans_mode & SPRD_SPI_RX_MODE))
complete(&ss->xfer_completion);
return IRQ_HANDLED;
}
if (val & SPRD_SPI_MASK_RX_END) {
writel_relaxed(SPRD_SPI_RX_END_CLR, ss->base + SPRD_SPI_INT_CLR);
if (ss->dma.rx_len < ss->len) {
ss->rx_buf += ss->dma.rx_len;
ss->dma.rx_len +=
ss->read_bufs(ss, ss->len - ss->dma.rx_len);
}
complete(&ss->xfer_completion);
return IRQ_HANDLED;
}
return IRQ_NONE;
}
static int sprd_spi_irq_init(struct platform_device *pdev, struct sprd_spi *ss)
{
int ret;
ss->irq = platform_get_irq(pdev, 0);
if (ss->irq < 0) {
dev_err(&pdev->dev, "failed to get irq resource\n");
return ss->irq;
}
ret = devm_request_irq(&pdev->dev, ss->irq, sprd_spi_handle_irq,
0, pdev->name, ss);
if (ret)
dev_err(&pdev->dev, "failed to request spi irq %d, ret = %d\n",
ss->irq, ret);
return ret;
}
static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
{
struct clk *clk_spi, *clk_parent;
@ -605,6 +887,35 @@ static int sprd_spi_clk_init(struct platform_device *pdev, struct sprd_spi *ss)
return 0;
}
static bool sprd_spi_can_dma(struct spi_controller *sctlr,
struct spi_device *spi, struct spi_transfer *t)
{
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
return ss->dma.enable && (t->len > SPRD_SPI_FIFO_SIZE);
}
static int sprd_spi_dma_init(struct platform_device *pdev, struct sprd_spi *ss)
{
int ret;
ret = sprd_spi_dma_request(ss);
if (ret) {
if (ret == -EPROBE_DEFER)
return ret;
dev_warn(&pdev->dev,
"failed to request dma, enter no dma mode, ret = %d\n",
ret);
return 0;
}
ss->dma.enable = true;
return 0;
}
static int sprd_spi_probe(struct platform_device *pdev)
{
struct spi_controller *sctlr;
@ -625,25 +936,36 @@ static int sprd_spi_probe(struct platform_device *pdev)
goto free_controller;
}
ss->phy_base = res->start;
ss->dev = &pdev->dev;
sctlr->dev.of_node = pdev->dev.of_node;
sctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_3WIRE | SPI_TX_DUAL;
sctlr->bus_num = pdev->id;
sctlr->set_cs = sprd_spi_chipselect;
sctlr->transfer_one = sprd_spi_transfer_one;
sctlr->can_dma = sprd_spi_can_dma;
sctlr->auto_runtime_pm = true;
sctlr->max_speed_hz = min_t(u32, ss->src_clk >> 1,
SPRD_SPI_MAX_SPEED_HZ);
init_completion(&ss->xfer_completion);
platform_set_drvdata(pdev, sctlr);
ret = sprd_spi_clk_init(pdev, ss);
if (ret)
goto free_controller;
ret = clk_prepare_enable(ss->clk);
ret = sprd_spi_irq_init(pdev, ss);
if (ret)
goto free_controller;
ret = sprd_spi_dma_init(pdev, ss);
if (ret)
goto free_controller;
ret = clk_prepare_enable(ss->clk);
if (ret)
goto release_dma;
ret = pm_runtime_set_active(&pdev->dev);
if (ret < 0)
goto disable_clk;
@ -672,6 +994,8 @@ static int sprd_spi_probe(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
disable_clk:
clk_disable_unprepare(ss->clk);
release_dma:
sprd_spi_dma_release(ss);
free_controller:
spi_controller_put(sctlr);
@ -690,6 +1014,10 @@ static int sprd_spi_remove(struct platform_device *pdev)
return ret;
}
spi_controller_suspend(sctlr);
if (ss->dma.enable)
sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
pm_runtime_put_noidle(&pdev->dev);
pm_runtime_disable(&pdev->dev);
@ -702,6 +1030,9 @@ static int __maybe_unused sprd_spi_runtime_suspend(struct device *dev)
struct spi_controller *sctlr = dev_get_drvdata(dev);
struct sprd_spi *ss = spi_controller_get_devdata(sctlr);
if (ss->dma.enable)
sprd_spi_dma_release(ss);
clk_disable_unprepare(ss->clk);
return 0;
@ -717,7 +1048,14 @@ static int __maybe_unused sprd_spi_runtime_resume(struct device *dev)
if (ret)
return ret;
return 0;
if (!ss->dma.enable)
return 0;
ret = sprd_spi_dma_request(ss);
if (ret)
clk_disable_unprepare(ss->clk);
return ret;
}
static const struct dev_pm_ops sprd_spi_pm_ops = {

File diff suppressed because it is too large Load Diff

View File

@ -19,6 +19,7 @@
#include <linux/spi/spi.h>
#include <linux/spi/spi-mem.h>
#include <linux/of_gpio.h>
#include <linux/gpio/consumer.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/property.h>
@ -578,7 +579,10 @@ int spi_add_device(struct spi_device *spi)
goto done;
}
if (ctlr->cs_gpios)
/* Descriptors take precedence */
if (ctlr->cs_gpiods)
spi->cs_gpiod = ctlr->cs_gpiods[spi->chip_select];
else if (ctlr->cs_gpios)
spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
/* Drivers may modify this initial i/o setup, but will
@ -772,10 +776,21 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
if (spi->mode & SPI_CS_HIGH)
enable = !enable;
if (gpio_is_valid(spi->cs_gpio)) {
/* Honour the SPI_NO_CS flag */
if (!(spi->mode & SPI_NO_CS))
gpio_set_value(spi->cs_gpio, !enable);
if (spi->cs_gpiod || gpio_is_valid(spi->cs_gpio)) {
/*
* Honour the SPI_NO_CS flag and invert the enable line, as
* active low is default for SPI. Execution paths that handle
* polarity inversion in gpiolib (such as device tree) will
* enforce active high using the SPI_CS_HIGH resulting in a
* double inversion through the code above.
*/
if (!(spi->mode & SPI_NO_CS)) {
if (spi->cs_gpiod)
gpiod_set_value_cansleep(spi->cs_gpiod,
!enable);
else
gpio_set_value_cansleep(spi->cs_gpio, !enable);
}
/* Some SPI masters need both GPIO CS & slave_select */
if ((spi->controller->flags & SPI_MASTER_GPIO_SS) &&
spi->controller->set_cs)
@ -1615,13 +1630,21 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi->mode |= SPI_CPHA;
if (of_property_read_bool(nc, "spi-cpol"))
spi->mode |= SPI_CPOL;
if (of_property_read_bool(nc, "spi-cs-high"))
spi->mode |= SPI_CS_HIGH;
if (of_property_read_bool(nc, "spi-3wire"))
spi->mode |= SPI_3WIRE;
if (of_property_read_bool(nc, "spi-lsb-first"))
spi->mode |= SPI_LSB_FIRST;
/*
* For descriptors associated with the device, polarity inversion is
* handled in the gpiolib, so all chip selects are "active high" in
* the logical sense, the gpiolib will invert the line if need be.
*/
if (ctlr->use_gpio_descriptors)
spi->mode |= SPI_CS_HIGH;
else if (of_property_read_bool(nc, "spi-cs-high"))
spi->mode |= SPI_CS_HIGH;
/* Device DUAL/QUAD mode */
if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
switch (value) {
@ -2137,6 +2160,60 @@ static int of_spi_register_master(struct spi_controller *ctlr)
}
#endif
/**
* spi_get_gpio_descs() - grab chip select GPIOs for the master
* @ctlr: The SPI master to grab GPIO descriptors for
*/
static int spi_get_gpio_descs(struct spi_controller *ctlr)
{
int nb, i;
struct gpio_desc **cs;
struct device *dev = &ctlr->dev;
nb = gpiod_count(dev, "cs");
ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect);
/* No GPIOs at all is fine, else return the error */
if (nb == 0 || nb == -ENOENT)
return 0;
else if (nb < 0)
return nb;
cs = devm_kcalloc(dev, ctlr->num_chipselect, sizeof(*cs),
GFP_KERNEL);
if (!cs)
return -ENOMEM;
ctlr->cs_gpiods = cs;
for (i = 0; i < nb; i++) {
/*
* Most chipselects are active low, the inverted
* semantics are handled by special quirks in gpiolib,
* so initializing them GPIOD_OUT_LOW here means
* "unasserted", in most cases this will drive the physical
* line high.
*/
cs[i] = devm_gpiod_get_index_optional(dev, "cs", i,
GPIOD_OUT_LOW);
if (cs[i]) {
/*
* If we find a CS GPIO, name it after the device and
* chip select line.
*/
char *gpioname;
gpioname = devm_kasprintf(dev, GFP_KERNEL, "%s CS%d",
dev_name(dev), i);
if (!gpioname)
return -ENOMEM;
gpiod_set_consumer_name(cs[i], gpioname);
}
}
return 0;
}
static int spi_controller_check_ops(struct spi_controller *ctlr)
{
/*
@ -2199,9 +2276,21 @@ int spi_register_controller(struct spi_controller *ctlr)
return status;
if (!spi_controller_is_slave(ctlr)) {
status = of_spi_register_master(ctlr);
if (status)
return status;
if (ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
return status;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
*/
ctlr->mode_bits |= SPI_CS_HIGH;
} else {
/* Legacy code path for GPIOs from DT */
status = of_spi_register_master(ctlr);
if (status)
return status;
}
}
/* even if it's just one always-selected device, there must
@ -2915,6 +3004,7 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* cs_change is set for each transfer.
*/
if ((spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
spi->cs_gpiod ||
gpio_is_valid(spi->cs_gpio))) {
size_t maxsize;
int ret;
@ -2961,6 +3051,8 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
* it is not set for this transfer.
* Set transfer tx_nbits and rx_nbits as single transfer default
* (SPI_NBITS_SINGLE) if it is not set for this transfer.
* Ensure transfer word_delay is at least as long as that required by
* device itself.
*/
message->frame_length = 0;
list_for_each_entry(xfer, &message->transfers, transfer_list) {
@ -3031,6 +3123,9 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
!(spi->mode & SPI_RX_QUAD))
return -EINVAL;
}
if (xfer->word_delay_usecs < spi->word_delay_usecs)
xfer->word_delay_usecs = spi->word_delay_usecs;
}
message->status = -EINPROGRESS;

View File

@ -22,7 +22,7 @@
struct dma_chan;
/* device.platform_data for SSP controller devices */
struct pxa2xx_spi_master {
struct pxa2xx_spi_controller {
u16 num_chipselect;
u8 enable_dma;
bool is_slave;
@ -54,7 +54,7 @@ struct pxa2xx_spi_chip {
#include <linux/clk.h>
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info);
extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info);
#endif
#endif

View File

@ -330,6 +330,11 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, void *buf);
ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
u64 offs, size_t len, const void *buf);
struct spi_mem_dirmap_desc *
devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
const struct spi_mem_dirmap_info *info);
void devm_spi_mem_dirmap_destroy(struct device *dev,
struct spi_mem_dirmap_desc *desc);
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);

View File

@ -12,6 +12,7 @@
#include <linux/kthread.h>
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/gpio/consumer.h>
struct dma_chan;
struct property_entry;
@ -116,8 +117,13 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
* @modalias: Name of the driver to use with this device, or an alias
* for that name. This appears in the sysfs "modalias" attribute
* for driver coldplugging, and in uevents used for hotplugging
* @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when
* @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
* not using a GPIO line) use cs_gpiod in new drivers by opting in on
* the spi_master.
* @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
* @word_delay_usecs: microsecond delay to be inserted between consecutive
* words of a transfer
*
* @statistics: statistics for the spi_device
*
@ -163,7 +169,9 @@ struct spi_device {
void *controller_data;
char modalias[SPI_NAME_SIZE];
const char *driver_override;
int cs_gpio; /* chip select gpio */
int cs_gpio; /* LEGACY: chip select gpio */
struct gpio_desc *cs_gpiod; /* chip select gpio desc */
uint8_t word_delay_usecs; /* inter-word delay */
/* the statistics */
struct spi_statistics statistics;
@ -376,9 +384,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* controller has native support for memory like operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
* @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
* CS number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
* in new drivers.
* @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
* number. Any individual value may be NULL for CS lines that
* are not GPIOs (driven by the SPI controller itself).
* @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
* GPIO descriptors rather than using global GPIO numbers grabbed by the
* driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
* and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
* @statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
@ -557,6 +573,8 @@ struct spi_controller {
/* gpio chip select */
int *cs_gpios;
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
/* statistics */
struct spi_statistics statistics;
@ -706,6 +724,8 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
* @word_delay_usecs: microseconds to inter word delay after each word size
* (set by bits_per_word) transmission.
* @word_delay: clock cycles to inter word delay after each word size
* (set by bits_per_word) transmission.
* @transfer_list: transfers are sequenced through @spi_message.transfers
@ -788,6 +808,7 @@ struct spi_transfer {
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
u8 word_delay_usecs;
u16 delay_usecs;
u32 speed_hz;
u16 word_delay;

View File

@ -109,6 +109,16 @@ TRACE_EVENT(spi_message_done,
(unsigned)__entry->actual, (unsigned)__entry->frame)
);
/*
* consider a buffer valid if non-NULL and if it doesn't match the dummy buffer
* that only exist to work with controllers that have SPI_CONTROLLER_MUST_TX or
* SPI_CONTROLLER_MUST_RX.
*/
#define spi_valid_txbuf(msg, xfer) \
(xfer->tx_buf && xfer->tx_buf != msg->spi->controller->dummy_tx)
#define spi_valid_rxbuf(msg, xfer) \
(xfer->rx_buf && xfer->rx_buf != msg->spi->controller->dummy_rx)
DECLARE_EVENT_CLASS(spi_transfer,
TP_PROTO(struct spi_message *msg, struct spi_transfer *xfer),
@ -120,6 +130,10 @@ DECLARE_EVENT_CLASS(spi_transfer,
__field( int, chip_select )
__field( struct spi_transfer *, xfer )
__field( int, len )
__dynamic_array(u8, rx_buf,
spi_valid_rxbuf(msg, xfer) ? xfer->len : 0)
__dynamic_array(u8, tx_buf,
spi_valid_txbuf(msg, xfer) ? xfer->len : 0)
),
TP_fast_assign(
@ -127,12 +141,21 @@ DECLARE_EVENT_CLASS(spi_transfer,
__entry->chip_select = msg->spi->chip_select;
__entry->xfer = xfer;
__entry->len = xfer->len;
if (spi_valid_txbuf(msg, xfer))
memcpy(__get_dynamic_array(tx_buf),
xfer->tx_buf, xfer->len);
if (spi_valid_rxbuf(msg, xfer))
memcpy(__get_dynamic_array(rx_buf),
xfer->rx_buf, xfer->len);
),
TP_printk("spi%d.%d %p len=%d", (int)__entry->bus_num,
(int)__entry->chip_select,
(struct spi_message *)__entry->xfer,
(int)__entry->len)
TP_printk("spi%d.%d %p len=%d tx=[%*phD] rx=[%*phD]",
__entry->bus_num, __entry->chip_select,
__entry->xfer, __entry->len,
__get_dynamic_array_len(tx_buf), __get_dynamic_array(tx_buf),
__get_dynamic_array_len(rx_buf), __get_dynamic_array(rx_buf))
);
DEFINE_EVENT(spi_transfer, spi_transfer_start,