forked from luck/tmp_suning_uos_patched
MMC core:
- Fix a few memoryleaks - Minor improvements to the card initialization sequence - Partially support sleepy GPIO controllers for pwrseq eMMC MMC host: - alcor: Work with multiple-entry sglists - alcor: Enable DMA for writes - meson-gx: Improve tuning support - meson-gx: Avoid clock glitch when switching to DDR modes - meson-gx: Disable unreliable HS400 mode - mmci: Minor updates for support of HW busy detection - mmci: Support data transfers for the stm32_sdmmc variant - mmci: Restructure code to better support different variants - mtk-sd: Add support for version found on MT7620 family SOCs - mtk-sd: Add support for the MT8516 version - mtk-sd: Add Chaotian Jing as the maintainer - sdhci: Reorganize request-code to convert from tasklet to workqueue - sdhci_am654: Stabilize support for lower speed modes - sdhci-esdhc-imx: Add HS400 support for iMX7ULP - sdhci-esdhc-imx: Add support for iMX7ULP version - sdhci-of-arasan: Allow to disable DCMDs via DT for CQE - sdhci-of-esdhc: Add support for the ls1028a version - sdhci-of-esdhc: Several fixups for errata - sdhci-pci: Fix BYT OCP setting - sdhci-pci: Add support for Intel CML - sdhci-tegra: Add support for system suspend/resume - sdhci-tegra: Add CQE support for Tegra186 WAR - sdhci-tegra: Add support for Tegra194 - sdhci-tegra: Update HW tuning process MEMSTICK: - I volunteered to help as a maintainer for the memstick subsystem, which is reflected by an update to the MAINTAINERS file. Changes are funneled through my MMC git and we will use the linux-mmc mailing list. MEMSTICK host: - A few minor cleanups -----BEGIN PGP SIGNATURE----- iQJLBAABCgA1FiEEugLDXPmKSktSkQsV/iaEJXNYjCkFAlzRJJsXHHVsZi5oYW5z c29uQGxpbmFyby5vcmcACgkQ/iaEJXNYjCmhixAArz1QWF0LlpiHTqop0pPgzbKj Z3LwQ/iyawPWpmU4SckcuBpSNTQ47I22Mzx7VPXeRyJt84T8MDlHfgA5Kc/VbRHf G+hSUYpcw63xNnzuWy54QxsW03PgjcSKe40bOsbGsKLhm3DAY9uuWiJnqCX7hIrY ALn3uQgeWEfJkUGwctvqe6883PBRNeu6Rwgb0IFN+rdIGBIVf6zAkTceK6m5MDoQ iJIj5tq+z4excDQryeEpFM3kcI8lm7Eoyb35DhWVeji0sTVXgQINbkJMmFV+O2bl hJMkpv5Zn2tqT6485McQuSdvLlTBi9FD7QJQQOgekgRUaq/FPS0N5dnhD2vK5kLs cLOcb9LAXeHu/Xzos2aQ2Km7y2ezXH5bLda1QLeoizKwf0b7p+qtQN32cGHEKkEN hZPTsiJiu5wgkHdkoAFp7yxf9H0TZW3rYgL0zmj31VXNChZ4oyEAYPUoUGuJbm6h 7WdXvAw19MkDl4eCHkUYqr8nwRgoBDEUYjjcc8ObsoGM1zgF3JR38Ke4dqAQZAI1 8xGI46tboQ3bKVC0uai7tO4DscJ1X/Mu5tUdk2So1F38vZ/fJFky/gnzZRoQURgY JLB/CTbRjfpaCm3ny5MtsAGvz0AkEWbOVqOArzAMpVUZhd+pRhdOtTWjrTCUKfrf WRPjbDiIDN8EFLrKVXU= =Ompt -----END PGP SIGNATURE----- Merge tag 'mmc-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc Pull MMC updates from Ulf Hansson: "MMC core: - Fix a few memoryleaks - Minor improvements to the card initialization sequence - Partially support sleepy GPIO controllers for pwrseq eMMC MMC host: - alcor: Work with multiple-entry sglists - alcor: Enable DMA for writes - meson-gx: Improve tuning support - meson-gx: Avoid clock glitch when switching to DDR modes - meson-gx: Disable unreliable HS400 mode - mmci: Minor updates for support of HW busy detection - mmci: Support data transfers for the stm32_sdmmc variant - mmci: Restructure code to better support different variants - mtk-sd: Add support for version found on MT7620 family SOCs - mtk-sd: Add support for the MT8516 version - mtk-sd: Add Chaotian Jing as the maintainer - sdhci: Reorganize request-code to convert from tasklet to workqueue - sdhci_am654: Stabilize support for lower speed modes - sdhci-esdhc-imx: Add HS400 support for iMX7ULP - sdhci-esdhc-imx: Add support for iMX7ULP version - sdhci-of-arasan: Allow to disable DCMDs via DT for CQE - sdhci-of-esdhc: Add support for the ls1028a version - sdhci-of-esdhc: Several fixups for errata - sdhci-pci: Fix BYT OCP setting - sdhci-pci: Add support for Intel CML - sdhci-tegra: Add support for system suspend/resume - sdhci-tegra: Add CQE support for Tegra186 WAR - sdhci-tegra: Add support for Tegra194 - sdhci-tegra: Update HW tuning process MEMSTICK: - I volunteered to help as a maintainer for the memstick subsystem, which is reflected by an update to the MAINTAINERS file. Changes are funneled through my MMC git and we will use the linux-mmc mailing list. MEMSTICK host: - A few minor cleanups" * tag 'mmc-v5.2' of git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc: (87 commits) mmc: sdhci-pci: Fix BYT OCP setting dt-bindings: mmc: add DT bindings for ls1028a eSDHC host controller mmc: alcor: Drop pointer to mmc_host from alcor_sdmmc_host mmc: mtk-sd: select REGULATOR mmc: mtk-sd: enable internal card-detect logic. mmc: mtk-sd: add support for config found in mt7620 family SOCs. mmc: mtk-sd: don't hard-code interrupt trigger type mmc: core: Fix tag set memory leak dt-bindings: mmc: Add support for MT8516 to mtk-sd mmc: mmci: Prevent polling for busy detection in IRQ context mmc: mmci: Cleanup mmci_cmd_irq() for busy detect mmc: usdhi6rol0: mark expected switch fall-throughs mmc: core: Verify SD bus width mmc: sdhci-esdhc-imx: Add HS400 support for iMX7ULP mmc: sdhci-esdhc-imx: add pm_qos to interact with cpuidle dt-bindings: mmc: fsl-imx-esdhc: add imx7ulp compatible string mmc: meson-gx: add signal resampling tuning mmc: meson-gx: remove Rx phase tuning mmc: meson-gx: avoid clock glitch when switching to DDR modes mmc: meson-gx: disable HS400 ...
This commit is contained in:
commit
01e5d1830c
|
@ -17,6 +17,7 @@ Required properties:
|
|||
"fsl,t4240-esdhc"
|
||||
Possible compatibles for ARM:
|
||||
"fsl,ls1012a-esdhc"
|
||||
"fsl,ls1028a-esdhc"
|
||||
"fsl,ls1088a-esdhc"
|
||||
"fsl,ls1043a-esdhc"
|
||||
"fsl,ls1046a-esdhc"
|
||||
|
|
|
@ -17,6 +17,7 @@ Required properties:
|
|||
"fsl,imx6sx-usdhc"
|
||||
"fsl,imx6ull-usdhc"
|
||||
"fsl,imx7d-usdhc"
|
||||
"fsl,imx7ulp-usdhc"
|
||||
"fsl,imx8qxp-usdhc"
|
||||
|
||||
Optional properties:
|
||||
|
|
|
@ -64,6 +64,8 @@ Optional properties:
|
|||
whether pwrseq-simple is used. Default to 10ms if no available.
|
||||
- supports-cqe : The presence of this property indicates that the corresponding
|
||||
MMC host controller supports HW command queue feature.
|
||||
- disable-cqe-dcmd: This property indicates that the MMC controller's command
|
||||
queue engine (CQE) does not support direct commands (DCMDs).
|
||||
|
||||
*NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line
|
||||
polarity properties, we have to fix the meaning of the "normal" and "inverted"
|
||||
|
|
|
@ -11,10 +11,12 @@ Required properties:
|
|||
"mediatek,mt8135-mmc": for mmc host ip compatible with mt8135
|
||||
"mediatek,mt8173-mmc": for mmc host ip compatible with mt8173
|
||||
"mediatek,mt8183-mmc": for mmc host ip compatible with mt8183
|
||||
"mediatek,mt8516-mmc": for mmc host ip compatible with mt8516
|
||||
"mediatek,mt2701-mmc": for mmc host ip compatible with mt2701
|
||||
"mediatek,mt2712-mmc": for mmc host ip compatible with mt2712
|
||||
"mediatek,mt7622-mmc": for MT7622 SoC
|
||||
"mediatek,mt7623-mmc", "mediatek,mt2701-mmc": for MT7623 SoC
|
||||
"mediatek,mt7620-mmc", for MT7621 SoC (and others)
|
||||
|
||||
- reg: physical base address of the controller and length
|
||||
- interrupts: Should contain MSDC interrupt number
|
||||
|
|
|
@ -14,6 +14,7 @@ Required properties:
|
|||
- "nvidia,tegra124-sdhci": for Tegra124 and Tegra132
|
||||
- "nvidia,tegra210-sdhci": for Tegra210
|
||||
- "nvidia,tegra186-sdhci": for Tegra186
|
||||
- "nvidia,tegra194-sdhci": for Tegra194
|
||||
- clocks : Must contain one entry, for the module clock.
|
||||
See ../clocks/clock-bindings.txt for details.
|
||||
- resets : Must contain an entry for each entry in reset-names.
|
||||
|
|
25
MAINTAINERS
25
MAINTAINERS
|
@ -9785,6 +9785,12 @@ F: drivers/media/platform/mtk-vpu/
|
|||
F: Documentation/devicetree/bindings/media/mediatek-vcodec.txt
|
||||
F: Documentation/devicetree/bindings/media/mediatek-vpu.txt
|
||||
|
||||
MEDIATEK MMC/SD/SDIO DRIVER
|
||||
M: Chaotian Jing <chaotian.jing@mediatek.com>
|
||||
S: Maintained
|
||||
F: drivers/mmc/host/mtk-sd.c
|
||||
F: Documentation/devicetree/bindings/mmc/mtk-sd.txt
|
||||
|
||||
MEDIATEK MT76 WIRELESS LAN DRIVER
|
||||
M: Felix Fietkau <nbd@nbd.name>
|
||||
M: Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
|
||||
|
@ -14484,16 +14490,15 @@ T: git git://linuxtv.org/media_tree.git
|
|||
S: Maintained
|
||||
F: drivers/media/i2c/imx355.c
|
||||
|
||||
SONY MEMORYSTICK CARD SUPPORT
|
||||
M: Alex Dubov <oakad@yahoo.com>
|
||||
W: http://tifmxx.berlios.de/
|
||||
S: Maintained
|
||||
F: drivers/memstick/host/tifm_ms.c
|
||||
|
||||
SONY MEMORYSTICK STANDARD SUPPORT
|
||||
SONY MEMORYSTICK SUBSYSTEM
|
||||
M: Maxim Levitsky <maximlevitsky@gmail.com>
|
||||
M: Alex Dubov <oakad@yahoo.com>
|
||||
M: Ulf Hansson <ulf.hansson@linaro.org>
|
||||
L: linux-mmc@vger.kernel.org
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
|
||||
S: Maintained
|
||||
F: drivers/memstick/core/ms_block.*
|
||||
F: drivers/memstick/
|
||||
F: include/linux/memstick.h
|
||||
|
||||
SONY VAIO CONTROL DEVICE DRIVER
|
||||
M: Mattia Dongili <malattia@linux.it>
|
||||
|
@ -15518,9 +15523,11 @@ S: Maintained
|
|||
F: drivers/net/ethernet/ti/cpsw*
|
||||
F: drivers/net/ethernet/ti/davinci*
|
||||
|
||||
TI FLASH MEDIA INTERFACE DRIVER
|
||||
TI FLASH MEDIA MEMORYSTICK/MMC DRIVERS
|
||||
M: Alex Dubov <oakad@yahoo.com>
|
||||
S: Maintained
|
||||
W: http://tifmxx.berlios.de/
|
||||
F: drivers/memstick/host/tifm_ms.c
|
||||
F: drivers/misc/tifm*
|
||||
F: drivers/mmc/host/tifm_sd.c
|
||||
F: include/linux/tifm.h
|
||||
|
|
|
@ -370,7 +370,6 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
|
|||
static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
|
||||
{
|
||||
struct jmb38x_ms_host *host = memstick_priv(msh);
|
||||
unsigned char *data;
|
||||
unsigned int data_len, cmd, t_val;
|
||||
|
||||
if (!(STATUS_HAS_MEDIA & readl(host->addr + STATUS))) {
|
||||
|
@ -402,8 +401,6 @@ static int jmb38x_ms_issue_cmd(struct memstick_host *msh)
|
|||
cmd |= TPC_WAIT_INT;
|
||||
}
|
||||
|
||||
data = host->req->data;
|
||||
|
||||
if (!no_dma)
|
||||
host->cmd_flags |= DMA_DATA;
|
||||
|
||||
|
|
|
@ -256,7 +256,6 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
|
|||
static int tifm_ms_issue_cmd(struct tifm_ms *host)
|
||||
{
|
||||
struct tifm_dev *sock = host->dev;
|
||||
unsigned char *data;
|
||||
unsigned int data_len, cmd, sys_param;
|
||||
|
||||
host->cmd_flags = 0;
|
||||
|
@ -265,8 +264,6 @@ static int tifm_ms_issue_cmd(struct tifm_ms *host)
|
|||
host->io_word = 0;
|
||||
host->cmd_flags = 0;
|
||||
|
||||
data = host->req->data;
|
||||
|
||||
host->use_dma = !no_dma;
|
||||
|
||||
if (host->req->long_data) {
|
||||
|
|
|
@ -363,11 +363,11 @@ int mmc_of_parse_voltage(struct device_node *np, u32 *mask)
|
|||
int num_ranges, i;
|
||||
|
||||
voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges);
|
||||
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
|
||||
if (!voltage_ranges) {
|
||||
pr_debug("%pOF: voltage-ranges unspecified\n", np);
|
||||
return 0;
|
||||
}
|
||||
num_ranges = num_ranges / sizeof(*voltage_ranges) / 2;
|
||||
if (!num_ranges) {
|
||||
pr_err("%pOF: voltage-ranges empty\n", np);
|
||||
return -EINVAL;
|
||||
|
@ -429,8 +429,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
|
|||
|
||||
if (mmc_gpio_alloc(host)) {
|
||||
put_device(&host->class_dev);
|
||||
ida_simple_remove(&mmc_host_ida, host->index);
|
||||
kfree(host);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -184,11 +184,7 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
|
|||
if (err)
|
||||
break;
|
||||
|
||||
/* if we're just probing, do a single pass */
|
||||
if (ocr == 0)
|
||||
break;
|
||||
|
||||
/* otherwise wait until reset completes */
|
||||
/* wait until reset completes */
|
||||
if (mmc_host_is_spi(host)) {
|
||||
if (!(cmd.resp[0] & R1_SPI_IDLE))
|
||||
break;
|
||||
|
@ -200,6 +196,16 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)
|
|||
err = -ETIMEDOUT;
|
||||
|
||||
mmc_delay(10);
|
||||
|
||||
/*
|
||||
* According to eMMC specification v5.1 section 6.4.3, we
|
||||
* should issue CMD1 repeatedly in the idle state until
|
||||
* the eMMC is ready. Otherwise some eMMC devices seem to enter
|
||||
* the inactive mode after mmc_init_card() issued CMD0 when
|
||||
* the eMMC device is busy.
|
||||
*/
|
||||
if (!ocr && !mmc_host_is_spi(host))
|
||||
cmd.arg = cmd.resp[0] | BIT(30);
|
||||
}
|
||||
|
||||
if (rocr && !mmc_host_is_spi(host))
|
||||
|
|
|
@ -30,19 +30,14 @@ struct mmc_pwrseq_emmc {
|
|||
|
||||
#define to_pwrseq_emmc(p) container_of(p, struct mmc_pwrseq_emmc, pwrseq)
|
||||
|
||||
static void __mmc_pwrseq_emmc_reset(struct mmc_pwrseq_emmc *pwrseq)
|
||||
{
|
||||
gpiod_set_value(pwrseq->reset_gpio, 1);
|
||||
udelay(1);
|
||||
gpiod_set_value(pwrseq->reset_gpio, 0);
|
||||
udelay(200);
|
||||
}
|
||||
|
||||
static void mmc_pwrseq_emmc_reset(struct mmc_host *host)
|
||||
{
|
||||
struct mmc_pwrseq_emmc *pwrseq = to_pwrseq_emmc(host->pwrseq);
|
||||
|
||||
__mmc_pwrseq_emmc_reset(pwrseq);
|
||||
gpiod_set_value_cansleep(pwrseq->reset_gpio, 1);
|
||||
udelay(1);
|
||||
gpiod_set_value_cansleep(pwrseq->reset_gpio, 0);
|
||||
udelay(200);
|
||||
}
|
||||
|
||||
static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
|
||||
|
@ -50,8 +45,11 @@ static int mmc_pwrseq_emmc_reset_nb(struct notifier_block *this,
|
|||
{
|
||||
struct mmc_pwrseq_emmc *pwrseq = container_of(this,
|
||||
struct mmc_pwrseq_emmc, reset_nb);
|
||||
gpiod_set_value(pwrseq->reset_gpio, 1);
|
||||
udelay(1);
|
||||
gpiod_set_value(pwrseq->reset_gpio, 0);
|
||||
udelay(200);
|
||||
|
||||
__mmc_pwrseq_emmc_reset(pwrseq);
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
|
@ -72,6 +70,7 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
|
|||
if (IS_ERR(pwrseq->reset_gpio))
|
||||
return PTR_ERR(pwrseq->reset_gpio);
|
||||
|
||||
if (!gpiod_cansleep(pwrseq->reset_gpio)) {
|
||||
/*
|
||||
* register reset handler to ensure emmc reset also from
|
||||
* emergency_reboot(), priority 255 is the highest priority
|
||||
|
@ -80,6 +79,9 @@ static int mmc_pwrseq_emmc_probe(struct platform_device *pdev)
|
|||
pwrseq->reset_nb.notifier_call = mmc_pwrseq_emmc_reset_nb;
|
||||
pwrseq->reset_nb.priority = 255;
|
||||
register_restart_handler(&pwrseq->reset_nb);
|
||||
} else {
|
||||
dev_notice(dev, "EMMC reset pin tied to a sleepy GPIO driver; reset on emergency-reboot disabled\n");
|
||||
}
|
||||
|
||||
pwrseq->pwrseq.ops = &mmc_pwrseq_emmc_ops;
|
||||
pwrseq->pwrseq.dev = dev;
|
||||
|
|
|
@ -472,6 +472,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
|
|||
blk_mq_unquiesce_queue(q);
|
||||
|
||||
blk_cleanup_queue(q);
|
||||
blk_mq_free_tag_set(&mq->tag_set);
|
||||
|
||||
/*
|
||||
* A request can be completed before the next request, potentially
|
||||
|
|
|
@ -221,6 +221,14 @@ static int mmc_decode_scr(struct mmc_card *card)
|
|||
|
||||
if (scr->sda_spec3)
|
||||
scr->cmds = UNSTUFF_BITS(resp, 32, 2);
|
||||
|
||||
/* SD Spec says: any SD Card shall set at least bits 0 and 2 */
|
||||
if (!(scr->bus_widths & SD_SCR_BUS_WIDTH_1) ||
|
||||
!(scr->bus_widths & SD_SCR_BUS_WIDTH_4)) {
|
||||
pr_err("%s: invalid bus width\n", mmc_hostname(card->host));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -92,6 +92,7 @@ config MMC_SDHCI_PCI
|
|||
tristate "SDHCI support on PCI bus"
|
||||
depends on MMC_SDHCI && PCI
|
||||
select MMC_CQHCI
|
||||
select IOSF_MBI if X86
|
||||
help
|
||||
This selects the PCI Secure Digital Host Controller Interface.
|
||||
Most controllers found today are PCI devices.
|
||||
|
@ -941,6 +942,7 @@ config MMC_BCM2835
|
|||
config MMC_MTK
|
||||
tristate "MediaTek SD/MMC Card Interface support"
|
||||
depends on HAS_DMA
|
||||
select REGULATOR
|
||||
help
|
||||
This selects the MediaTek(R) Secure digital and Multimedia card Interface.
|
||||
If you have a machine with a integrated SD/MMC card reader, say Y or M here.
|
||||
|
@ -957,6 +959,7 @@ config MMC_SDHCI_MICROCHIP_PIC32
|
|||
If you have a controller with this interface, say Y or M here.
|
||||
|
||||
If unsure, say N.
|
||||
|
||||
config MMC_SDHCI_BRCMSTB
|
||||
tristate "Broadcom SDIO/SD/MMC support"
|
||||
depends on ARCH_BRCMSTB || BMIPS_GENERIC
|
||||
|
@ -993,6 +996,7 @@ config MMC_SDHCI_OMAP
|
|||
config MMC_SDHCI_AM654
|
||||
tristate "Support for the SDHCI Controller in TI's AM654 SOCs"
|
||||
depends on MMC_SDHCI_PLTFM && OF
|
||||
select MMC_SDHCI_IO_ACCESSORS
|
||||
help
|
||||
This selects the Secure Digital Host Controller Interface (SDHCI)
|
||||
support present in TI's AM654 SOCs. The controller supports
|
||||
|
|
|
@ -43,7 +43,6 @@ struct alcor_sdmmc_host {
|
|||
struct device *dev;
|
||||
struct alcor_pci_priv *alcor_pci;
|
||||
|
||||
struct mmc_host *mmc;
|
||||
struct mmc_request *mrq;
|
||||
struct mmc_command *cmd;
|
||||
struct mmc_data *data;
|
||||
|
@ -117,6 +116,9 @@ static void alcor_reset(struct alcor_sdmmc_host *host, u8 val)
|
|||
dev_err(host->dev, "%s: timeout\n", __func__);
|
||||
}
|
||||
|
||||
/*
|
||||
* Perform DMA I/O of a single page.
|
||||
*/
|
||||
static void alcor_data_set_dma(struct alcor_sdmmc_host *host)
|
||||
{
|
||||
struct alcor_pci_priv *priv = host->alcor_pci;
|
||||
|
@ -153,12 +155,26 @@ static void alcor_trigger_data_transfer(struct alcor_sdmmc_host *host)
|
|||
ctrl |= AU6601_DATA_WRITE;
|
||||
|
||||
if (data->host_cookie == COOKIE_MAPPED) {
|
||||
/*
|
||||
* For DMA transfers, this function is called just once,
|
||||
* at the start of the operation. The hardware can only
|
||||
* perform DMA I/O on a single page at a time, so here
|
||||
* we kick off the transfer with the first page, and expect
|
||||
* subsequent pages to be transferred upon IRQ events
|
||||
* indicating that the single-page DMA was completed.
|
||||
*/
|
||||
alcor_data_set_dma(host);
|
||||
ctrl |= AU6601_DATA_DMA_MODE;
|
||||
host->dma_on = 1;
|
||||
alcor_write32(priv, data->sg_count * 0x1000,
|
||||
AU6601_REG_BLOCK_SIZE);
|
||||
} else {
|
||||
/*
|
||||
* For PIO transfers, we break down each operation
|
||||
* into several sector-sized transfers. When one sector has
|
||||
* complete, the IRQ handler will call this function again
|
||||
* to kick off the transfer of the next sector.
|
||||
*/
|
||||
alcor_write32(priv, data->blksz, AU6601_REG_BLOCK_SIZE);
|
||||
}
|
||||
|
||||
|
@ -276,7 +292,7 @@ static void alcor_send_cmd(struct alcor_sdmmc_host *host,
|
|||
break;
|
||||
default:
|
||||
dev_err(host->dev, "%s: cmd->flag (0x%02x) is not valid\n",
|
||||
mmc_hostname(host->mmc), mmc_resp_type(cmd));
|
||||
mmc_hostname(mmc_from_priv(host)), mmc_resp_type(cmd));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -317,7 +333,7 @@ static void alcor_request_complete(struct alcor_sdmmc_host *host,
|
|||
host->data = NULL;
|
||||
host->dma_on = 0;
|
||||
|
||||
mmc_request_done(host->mmc, mrq);
|
||||
mmc_request_done(mmc_from_priv(host), mrq);
|
||||
}
|
||||
|
||||
static void alcor_finish_data(struct alcor_sdmmc_host *host)
|
||||
|
@ -547,7 +563,7 @@ static void alcor_cd_irq(struct alcor_sdmmc_host *host, u32 intmask)
|
|||
alcor_request_complete(host, 1);
|
||||
}
|
||||
|
||||
mmc_detect_change(host->mmc, msecs_to_jiffies(1));
|
||||
mmc_detect_change(mmc_from_priv(host), msecs_to_jiffies(1));
|
||||
}
|
||||
|
||||
static irqreturn_t alcor_irq_thread(int irq, void *d)
|
||||
|
@ -771,12 +787,17 @@ static void alcor_pre_req(struct mmc_host *mmc,
|
|||
data->host_cookie = COOKIE_UNMAPPED;
|
||||
|
||||
/* FIXME: looks like the DMA engine works only with CMD18 */
|
||||
if (cmd->opcode != 18)
|
||||
if (cmd->opcode != MMC_READ_MULTIPLE_BLOCK
|
||||
&& cmd->opcode != MMC_WRITE_MULTIPLE_BLOCK)
|
||||
return;
|
||||
/*
|
||||
* We don't do DMA on "complex" transfers, i.e. with
|
||||
* non-word-aligned buffers or lengths. Also, we don't bother
|
||||
* with all the DMA setup overhead for short transfers.
|
||||
* non-word-aligned buffers or lengths. A future improvement
|
||||
* could be made to use temporary DMA bounce-buffers when these
|
||||
* requirements are not met.
|
||||
*
|
||||
* Also, we don't bother with all the DMA setup overhead for
|
||||
* short transfers.
|
||||
*/
|
||||
if (data->blocks * data->blksz < AU6601_MAX_DMA_BLOCK_SIZE)
|
||||
return;
|
||||
|
@ -787,6 +808,8 @@ static void alcor_pre_req(struct mmc_host *mmc,
|
|||
for_each_sg(data->sg, sg, data->sg_len, i) {
|
||||
if (sg->length != AU6601_MAX_DMA_BLOCK_SIZE)
|
||||
return;
|
||||
if (sg->offset != 0)
|
||||
return;
|
||||
}
|
||||
|
||||
/* This data might be unmapped at this time */
|
||||
|
@ -1024,7 +1047,7 @@ static void alcor_hw_uninit(struct alcor_sdmmc_host *host)
|
|||
|
||||
static void alcor_init_mmc(struct alcor_sdmmc_host *host)
|
||||
{
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
struct mmc_host *mmc = mmc_from_priv(host);
|
||||
|
||||
mmc->f_min = AU6601_MIN_CLOCK;
|
||||
mmc->f_max = AU6601_MAX_CLOCK;
|
||||
|
@ -1036,26 +1059,21 @@ static void alcor_init_mmc(struct alcor_sdmmc_host *host)
|
|||
mmc->ops = &alcor_sdc_ops;
|
||||
|
||||
/* The hardware does DMA data transfer of 4096 bytes to/from a single
|
||||
* buffer address. Scatterlists are not supported, but upon DMA
|
||||
* completion (signalled via IRQ), the original vendor driver does
|
||||
* then immediately set up another DMA transfer of the next 4096
|
||||
* bytes.
|
||||
* buffer address. Scatterlists are not supported at the hardware
|
||||
* level, however we can work with them at the driver level,
|
||||
* provided that each segment is exactly 4096 bytes in size.
|
||||
* Upon DMA completion of a single segment (signalled via IRQ), we
|
||||
* immediately proceed to transfer the next segment from the
|
||||
* scatterlist.
|
||||
*
|
||||
* This means that we need to handle the I/O in 4096 byte chunks.
|
||||
* Lacking a way to limit the sglist entries to 4096 bytes, we instead
|
||||
* impose that only one segment is provided, with maximum size 4096,
|
||||
* which also happens to be the minimum size. This means that the
|
||||
* single-entry sglist handled by this driver can be handed directly
|
||||
* to the hardware, nice and simple.
|
||||
*
|
||||
* Unfortunately though, that means we only do 4096 bytes I/O per
|
||||
* MMC command. A future improvement would be to make the driver
|
||||
* accept sg lists and entries of any size, and simply iterate
|
||||
* through them 4096 bytes at a time.
|
||||
* The overall request is limited to 240 sectors, matching the
|
||||
* original vendor driver.
|
||||
*/
|
||||
mmc->max_segs = AU6601_MAX_DMA_SEGMENTS;
|
||||
mmc->max_seg_size = AU6601_MAX_DMA_BLOCK_SIZE;
|
||||
mmc->max_req_size = mmc->max_seg_size;
|
||||
mmc->max_blk_count = 240;
|
||||
mmc->max_req_size = mmc->max_blk_count * mmc->max_blk_size;
|
||||
dma_set_max_seg_size(host->dev, mmc->max_seg_size);
|
||||
}
|
||||
|
||||
static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
|
||||
|
@ -1072,7 +1090,6 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
host = mmc_priv(mmc);
|
||||
host->mmc = mmc;
|
||||
host->dev = &pdev->dev;
|
||||
host->cur_power_mode = MMC_POWER_UNDEFINED;
|
||||
host->alcor_pci = priv;
|
||||
|
@ -1104,13 +1121,14 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
|
|||
static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
|
||||
{
|
||||
struct alcor_sdmmc_host *host = dev_get_drvdata(&pdev->dev);
|
||||
struct mmc_host *mmc = mmc_from_priv(host);
|
||||
|
||||
if (cancel_delayed_work_sync(&host->timeout_work))
|
||||
alcor_request_complete(host, 0);
|
||||
|
||||
alcor_hw_uninit(host);
|
||||
mmc_remove_host(host->mmc);
|
||||
mmc_free_host(host->mmc);
|
||||
mmc_remove_host(mmc);
|
||||
mmc_free_host(mmc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -537,6 +537,8 @@ static void cqhci_prep_dcmd_desc(struct mmc_host *mmc,
|
|||
CQHCI_ACT(0x5) |
|
||||
CQHCI_CMD_INDEX(mrq->cmd->opcode) |
|
||||
CQHCI_CMD_TIMING(timing) | CQHCI_RESP_TYPE(resp_type));
|
||||
if (cq_host->ops->update_dcmd_desc)
|
||||
cq_host->ops->update_dcmd_desc(mmc, mrq, &data);
|
||||
*task_desc |= data;
|
||||
desc = (u8 *)task_desc;
|
||||
pr_debug("%s: cqhci: dcmd: cmd: %d timing: %d resp: %d\n",
|
||||
|
|
|
@ -88,6 +88,7 @@
|
|||
|
||||
/* send status config 1 */
|
||||
#define CQHCI_SSC1 0x40
|
||||
#define CQHCI_SSC1_CBC_MASK GENMASK(19, 16)
|
||||
|
||||
/* send status config 2 */
|
||||
#define CQHCI_SSC2 0x44
|
||||
|
@ -147,6 +148,7 @@
|
|||
|
||||
struct cqhci_host_ops;
|
||||
struct mmc_host;
|
||||
struct mmc_request;
|
||||
struct cqhci_slot;
|
||||
|
||||
struct cqhci_host {
|
||||
|
@ -210,6 +212,8 @@ struct cqhci_host_ops {
|
|||
u32 (*read_l)(struct cqhci_host *host, int reg);
|
||||
void (*enable)(struct mmc_host *mmc);
|
||||
void (*disable)(struct mmc_host *mmc, bool recovery);
|
||||
void (*update_dcmd_desc)(struct mmc_host *mmc, struct mmc_request *mrq,
|
||||
u64 *data);
|
||||
};
|
||||
|
||||
static inline void cqhci_writel(struct cqhci_host *host, u32 val, int reg)
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
#include <linux/init.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/device.h>
|
||||
#include <linux/iopoll.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/ioport.h>
|
||||
|
@ -48,6 +49,8 @@
|
|||
#define CLK_CORE_PHASE_MASK GENMASK(9, 8)
|
||||
#define CLK_TX_PHASE_MASK GENMASK(11, 10)
|
||||
#define CLK_RX_PHASE_MASK GENMASK(13, 12)
|
||||
#define CLK_PHASE_0 0
|
||||
#define CLK_PHASE_180 2
|
||||
#define CLK_V2_TX_DELAY_MASK GENMASK(19, 16)
|
||||
#define CLK_V2_RX_DELAY_MASK GENMASK(23, 20)
|
||||
#define CLK_V2_ALWAYS_ON BIT(24)
|
||||
|
@ -56,10 +59,6 @@
|
|||
#define CLK_V3_RX_DELAY_MASK GENMASK(27, 22)
|
||||
#define CLK_V3_ALWAYS_ON BIT(28)
|
||||
|
||||
#define CLK_DELAY_STEP_PS 200
|
||||
#define CLK_PHASE_STEP 30
|
||||
#define CLK_PHASE_POINT_NUM (360 / CLK_PHASE_STEP)
|
||||
|
||||
#define CLK_TX_DELAY_MASK(h) (h->data->tx_delay_mask)
|
||||
#define CLK_RX_DELAY_MASK(h) (h->data->rx_delay_mask)
|
||||
#define CLK_ALWAYS_ON(h) (h->data->always_on)
|
||||
|
@ -164,10 +163,10 @@ struct meson_host {
|
|||
|
||||
void __iomem *regs;
|
||||
struct clk *core_clk;
|
||||
struct clk *mux_clk;
|
||||
struct clk *mmc_clk;
|
||||
struct clk *rx_clk;
|
||||
struct clk *tx_clk;
|
||||
unsigned long req_rate;
|
||||
bool ddr;
|
||||
|
||||
struct pinctrl *pinctrl;
|
||||
struct pinctrl_state *pins_default;
|
||||
|
@ -207,90 +206,6 @@ struct meson_host {
|
|||
#define CMD_RESP_MASK GENMASK(31, 1)
|
||||
#define CMD_RESP_SRAM BIT(0)
|
||||
|
||||
struct meson_mmc_phase {
|
||||
struct clk_hw hw;
|
||||
void __iomem *reg;
|
||||
unsigned long phase_mask;
|
||||
unsigned long delay_mask;
|
||||
unsigned int delay_step_ps;
|
||||
};
|
||||
|
||||
#define to_meson_mmc_phase(_hw) container_of(_hw, struct meson_mmc_phase, hw)
|
||||
|
||||
static int meson_mmc_clk_get_phase(struct clk_hw *hw)
|
||||
{
|
||||
struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
|
||||
unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
|
||||
unsigned long period_ps, p, d;
|
||||
int degrees;
|
||||
u32 val;
|
||||
|
||||
val = readl(mmc->reg);
|
||||
p = (val & mmc->phase_mask) >> __ffs(mmc->phase_mask);
|
||||
degrees = p * 360 / phase_num;
|
||||
|
||||
if (mmc->delay_mask) {
|
||||
period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
|
||||
clk_get_rate(hw->clk));
|
||||
d = (val & mmc->delay_mask) >> __ffs(mmc->delay_mask);
|
||||
degrees += d * mmc->delay_step_ps * 360 / period_ps;
|
||||
degrees %= 360;
|
||||
}
|
||||
|
||||
return degrees;
|
||||
}
|
||||
|
||||
static void meson_mmc_apply_phase_delay(struct meson_mmc_phase *mmc,
|
||||
unsigned int phase,
|
||||
unsigned int delay)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
val = readl(mmc->reg);
|
||||
val &= ~mmc->phase_mask;
|
||||
val |= phase << __ffs(mmc->phase_mask);
|
||||
|
||||
if (mmc->delay_mask) {
|
||||
val &= ~mmc->delay_mask;
|
||||
val |= delay << __ffs(mmc->delay_mask);
|
||||
}
|
||||
|
||||
writel(val, mmc->reg);
|
||||
}
|
||||
|
||||
static int meson_mmc_clk_set_phase(struct clk_hw *hw, int degrees)
|
||||
{
|
||||
struct meson_mmc_phase *mmc = to_meson_mmc_phase(hw);
|
||||
unsigned int phase_num = 1 << hweight_long(mmc->phase_mask);
|
||||
unsigned long period_ps, d = 0, r;
|
||||
uint64_t p;
|
||||
|
||||
p = degrees % 360;
|
||||
|
||||
if (!mmc->delay_mask) {
|
||||
p = DIV_ROUND_CLOSEST_ULL(p, 360 / phase_num);
|
||||
} else {
|
||||
period_ps = DIV_ROUND_UP((unsigned long)NSEC_PER_SEC * 1000,
|
||||
clk_get_rate(hw->clk));
|
||||
|
||||
/* First compute the phase index (p), the remainder (r) is the
|
||||
* part we'll try to acheive using the delays (d).
|
||||
*/
|
||||
r = do_div(p, 360 / phase_num);
|
||||
d = DIV_ROUND_CLOSEST(r * period_ps,
|
||||
360 * mmc->delay_step_ps);
|
||||
d = min(d, mmc->delay_mask >> __ffs(mmc->delay_mask));
|
||||
}
|
||||
|
||||
meson_mmc_apply_phase_delay(mmc, p, d);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct clk_ops meson_mmc_clk_phase_ops = {
|
||||
.get_phase = meson_mmc_clk_get_phase,
|
||||
.set_phase = meson_mmc_clk_set_phase,
|
||||
};
|
||||
|
||||
static unsigned int meson_mmc_get_timeout_msecs(struct mmc_data *data)
|
||||
{
|
||||
unsigned int timeout = data->timeout_ns / NSEC_PER_MSEC;
|
||||
|
@ -383,16 +298,6 @@ static void meson_mmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
|
|||
mmc_get_dma_dir(data));
|
||||
}
|
||||
|
||||
static bool meson_mmc_timing_is_ddr(struct mmc_ios *ios)
|
||||
{
|
||||
if (ios->timing == MMC_TIMING_MMC_DDR52 ||
|
||||
ios->timing == MMC_TIMING_UHS_DDR50 ||
|
||||
ios->timing == MMC_TIMING_MMC_HS400)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Gating the clock on this controller is tricky. It seems the mmc clock
|
||||
* is also used by the controller. It may crash during some operation if the
|
||||
|
@ -429,36 +334,41 @@ static void meson_mmc_clk_ungate(struct meson_host *host)
|
|||
writel(cfg, host->regs + SD_EMMC_CFG);
|
||||
}
|
||||
|
||||
static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios)
|
||||
static int meson_mmc_clk_set(struct meson_host *host, unsigned long rate,
|
||||
bool ddr)
|
||||
{
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
unsigned long rate = ios->clock;
|
||||
int ret;
|
||||
u32 cfg;
|
||||
|
||||
/* DDR modes require higher module clock */
|
||||
if (meson_mmc_timing_is_ddr(ios))
|
||||
rate <<= 1;
|
||||
|
||||
/* Same request - bail-out */
|
||||
if (host->req_rate == rate)
|
||||
if (host->ddr == ddr && host->req_rate == rate)
|
||||
return 0;
|
||||
|
||||
/* stop clock */
|
||||
meson_mmc_clk_gate(host);
|
||||
host->req_rate = 0;
|
||||
|
||||
if (!rate) {
|
||||
mmc->actual_clock = 0;
|
||||
|
||||
/* return with clock being stopped */
|
||||
if (!rate)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Stop the clock during rate change to avoid glitches */
|
||||
cfg = readl(host->regs + SD_EMMC_CFG);
|
||||
cfg |= CFG_STOP_CLOCK;
|
||||
writel(cfg, host->regs + SD_EMMC_CFG);
|
||||
|
||||
if (ddr) {
|
||||
/* DDR modes require higher module clock */
|
||||
rate <<= 1;
|
||||
cfg |= CFG_DDR;
|
||||
} else {
|
||||
cfg &= ~CFG_DDR;
|
||||
}
|
||||
writel(cfg, host->regs + SD_EMMC_CFG);
|
||||
host->ddr = ddr;
|
||||
|
||||
ret = clk_set_rate(host->mmc_clk, rate);
|
||||
if (ret) {
|
||||
dev_err(host->dev, "Unable to set cfg_div_clk to %lu. ret=%d\n",
|
||||
|
@ -470,12 +380,14 @@ static int meson_mmc_clk_set(struct meson_host *host, struct mmc_ios *ios)
|
|||
mmc->actual_clock = clk_get_rate(host->mmc_clk);
|
||||
|
||||
/* We should report the real output frequency of the controller */
|
||||
if (meson_mmc_timing_is_ddr(ios))
|
||||
if (ddr) {
|
||||
host->req_rate >>= 1;
|
||||
mmc->actual_clock >>= 1;
|
||||
}
|
||||
|
||||
dev_dbg(host->dev, "clk rate: %u Hz\n", mmc->actual_clock);
|
||||
if (ios->clock != mmc->actual_clock)
|
||||
dev_dbg(host->dev, "requested rate was %u\n", ios->clock);
|
||||
if (rate != mmc->actual_clock)
|
||||
dev_dbg(host->dev, "requested rate was %lu\n", rate);
|
||||
|
||||
/* (re)start clock */
|
||||
meson_mmc_clk_ungate(host);
|
||||
|
@ -493,8 +405,6 @@ static int meson_mmc_clk_init(struct meson_host *host)
|
|||
struct clk_init_data init;
|
||||
struct clk_mux *mux;
|
||||
struct clk_divider *div;
|
||||
struct meson_mmc_phase *core, *tx, *rx;
|
||||
struct clk *clk;
|
||||
char clk_name[32];
|
||||
int i, ret = 0;
|
||||
const char *mux_parent_names[MUX_CLK_NUM_PARENTS];
|
||||
|
@ -502,9 +412,11 @@ static int meson_mmc_clk_init(struct meson_host *host)
|
|||
u32 clk_reg;
|
||||
|
||||
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
|
||||
clk_reg = 0;
|
||||
clk_reg |= CLK_ALWAYS_ON(host);
|
||||
clk_reg = CLK_ALWAYS_ON(host);
|
||||
clk_reg |= CLK_DIV_MASK;
|
||||
clk_reg |= FIELD_PREP(CLK_CORE_PHASE_MASK, CLK_PHASE_180);
|
||||
clk_reg |= FIELD_PREP(CLK_TX_PHASE_MASK, CLK_PHASE_0);
|
||||
clk_reg |= FIELD_PREP(CLK_RX_PHASE_MASK, CLK_PHASE_0);
|
||||
writel(clk_reg, host->regs + SD_EMMC_CLOCK);
|
||||
|
||||
/* get the mux parents */
|
||||
|
@ -540,9 +452,9 @@ static int meson_mmc_clk_init(struct meson_host *host)
|
|||
mux->mask = CLK_SRC_MASK >> mux->shift;
|
||||
mux->hw.init = &init;
|
||||
|
||||
clk = devm_clk_register(host->dev, &mux->hw);
|
||||
if (WARN_ON(IS_ERR(clk)))
|
||||
return PTR_ERR(clk);
|
||||
host->mux_clk = devm_clk_register(host->dev, &mux->hw);
|
||||
if (WARN_ON(IS_ERR(host->mux_clk)))
|
||||
return PTR_ERR(host->mux_clk);
|
||||
|
||||
/* create the divider */
|
||||
div = devm_kzalloc(host->dev, sizeof(*div), GFP_KERNEL);
|
||||
|
@ -553,7 +465,7 @@ static int meson_mmc_clk_init(struct meson_host *host)
|
|||
init.name = clk_name;
|
||||
init.ops = &clk_divider_ops;
|
||||
init.flags = CLK_SET_RATE_PARENT;
|
||||
clk_parent[0] = __clk_get_name(clk);
|
||||
clk_parent[0] = __clk_get_name(host->mux_clk);
|
||||
init.parent_names = clk_parent;
|
||||
init.num_parents = 1;
|
||||
|
||||
|
@ -563,190 +475,104 @@ static int meson_mmc_clk_init(struct meson_host *host)
|
|||
div->hw.init = &init;
|
||||
div->flags = CLK_DIVIDER_ONE_BASED;
|
||||
|
||||
clk = devm_clk_register(host->dev, &div->hw);
|
||||
if (WARN_ON(IS_ERR(clk)))
|
||||
return PTR_ERR(clk);
|
||||
|
||||
/* create the mmc core clock */
|
||||
core = devm_kzalloc(host->dev, sizeof(*core), GFP_KERNEL);
|
||||
if (!core)
|
||||
return -ENOMEM;
|
||||
|
||||
snprintf(clk_name, sizeof(clk_name), "%s#core", dev_name(host->dev));
|
||||
init.name = clk_name;
|
||||
init.ops = &meson_mmc_clk_phase_ops;
|
||||
init.flags = CLK_SET_RATE_PARENT;
|
||||
clk_parent[0] = __clk_get_name(clk);
|
||||
init.parent_names = clk_parent;
|
||||
init.num_parents = 1;
|
||||
|
||||
core->reg = host->regs + SD_EMMC_CLOCK;
|
||||
core->phase_mask = CLK_CORE_PHASE_MASK;
|
||||
core->hw.init = &init;
|
||||
|
||||
host->mmc_clk = devm_clk_register(host->dev, &core->hw);
|
||||
if (WARN_ON(PTR_ERR_OR_ZERO(host->mmc_clk)))
|
||||
host->mmc_clk = devm_clk_register(host->dev, &div->hw);
|
||||
if (WARN_ON(IS_ERR(host->mmc_clk)))
|
||||
return PTR_ERR(host->mmc_clk);
|
||||
|
||||
/* create the mmc tx clock */
|
||||
tx = devm_kzalloc(host->dev, sizeof(*tx), GFP_KERNEL);
|
||||
if (!tx)
|
||||
return -ENOMEM;
|
||||
|
||||
snprintf(clk_name, sizeof(clk_name), "%s#tx", dev_name(host->dev));
|
||||
init.name = clk_name;
|
||||
init.ops = &meson_mmc_clk_phase_ops;
|
||||
init.flags = 0;
|
||||
clk_parent[0] = __clk_get_name(host->mmc_clk);
|
||||
init.parent_names = clk_parent;
|
||||
init.num_parents = 1;
|
||||
|
||||
tx->reg = host->regs + SD_EMMC_CLOCK;
|
||||
tx->phase_mask = CLK_TX_PHASE_MASK;
|
||||
tx->delay_mask = CLK_TX_DELAY_MASK(host);
|
||||
tx->delay_step_ps = CLK_DELAY_STEP_PS;
|
||||
tx->hw.init = &init;
|
||||
|
||||
host->tx_clk = devm_clk_register(host->dev, &tx->hw);
|
||||
if (WARN_ON(PTR_ERR_OR_ZERO(host->tx_clk)))
|
||||
return PTR_ERR(host->tx_clk);
|
||||
|
||||
/* create the mmc rx clock */
|
||||
rx = devm_kzalloc(host->dev, sizeof(*rx), GFP_KERNEL);
|
||||
if (!rx)
|
||||
return -ENOMEM;
|
||||
|
||||
snprintf(clk_name, sizeof(clk_name), "%s#rx", dev_name(host->dev));
|
||||
init.name = clk_name;
|
||||
init.ops = &meson_mmc_clk_phase_ops;
|
||||
init.flags = 0;
|
||||
clk_parent[0] = __clk_get_name(host->mmc_clk);
|
||||
init.parent_names = clk_parent;
|
||||
init.num_parents = 1;
|
||||
|
||||
rx->reg = host->regs + SD_EMMC_CLOCK;
|
||||
rx->phase_mask = CLK_RX_PHASE_MASK;
|
||||
rx->delay_mask = CLK_RX_DELAY_MASK(host);
|
||||
rx->delay_step_ps = CLK_DELAY_STEP_PS;
|
||||
rx->hw.init = &init;
|
||||
|
||||
host->rx_clk = devm_clk_register(host->dev, &rx->hw);
|
||||
if (WARN_ON(PTR_ERR_OR_ZERO(host->rx_clk)))
|
||||
return PTR_ERR(host->rx_clk);
|
||||
|
||||
/* init SD_EMMC_CLOCK to sane defaults w/min clock rate */
|
||||
host->mmc->f_min = clk_round_rate(host->mmc_clk, 400000);
|
||||
ret = clk_set_rate(host->mmc_clk, host->mmc->f_min);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
clk_set_phase(host->mmc_clk, 180);
|
||||
clk_set_phase(host->tx_clk, 0);
|
||||
clk_set_phase(host->rx_clk, 0);
|
||||
|
||||
return clk_prepare_enable(host->mmc_clk);
|
||||
}
|
||||
|
||||
static void meson_mmc_shift_map(unsigned long *map, unsigned long shift)
|
||||
static void meson_mmc_disable_resampling(struct meson_host *host)
|
||||
{
|
||||
DECLARE_BITMAP(left, CLK_PHASE_POINT_NUM);
|
||||
DECLARE_BITMAP(right, CLK_PHASE_POINT_NUM);
|
||||
unsigned int val = readl(host->regs + host->data->adjust);
|
||||
|
||||
/*
|
||||
* shift the bitmap right and reintroduce the dropped bits on the left
|
||||
* of the bitmap
|
||||
*/
|
||||
bitmap_shift_right(right, map, shift, CLK_PHASE_POINT_NUM);
|
||||
bitmap_shift_left(left, map, CLK_PHASE_POINT_NUM - shift,
|
||||
CLK_PHASE_POINT_NUM);
|
||||
bitmap_or(map, left, right, CLK_PHASE_POINT_NUM);
|
||||
val &= ~ADJUST_ADJ_EN;
|
||||
writel(val, host->regs + host->data->adjust);
|
||||
}
|
||||
|
||||
static void meson_mmc_find_next_region(unsigned long *map,
|
||||
unsigned long *start,
|
||||
unsigned long *stop)
|
||||
static void meson_mmc_reset_resampling(struct meson_host *host)
|
||||
{
|
||||
*start = find_next_bit(map, CLK_PHASE_POINT_NUM, *start);
|
||||
*stop = find_next_zero_bit(map, CLK_PHASE_POINT_NUM, *start);
|
||||
unsigned int val;
|
||||
|
||||
meson_mmc_disable_resampling(host);
|
||||
|
||||
val = readl(host->regs + host->data->adjust);
|
||||
val &= ~ADJUST_ADJ_DELAY_MASK;
|
||||
writel(val, host->regs + host->data->adjust);
|
||||
}
|
||||
|
||||
static int meson_mmc_find_tuning_point(unsigned long *test)
|
||||
{
|
||||
unsigned long shift, stop, offset = 0, start = 0, size = 0;
|
||||
|
||||
/* Get the all good/all bad situation out the way */
|
||||
if (bitmap_full(test, CLK_PHASE_POINT_NUM))
|
||||
return 0; /* All points are good so point 0 will do */
|
||||
else if (bitmap_empty(test, CLK_PHASE_POINT_NUM))
|
||||
return -EIO; /* No successful tuning point */
|
||||
|
||||
/*
|
||||
* Now we know there is a least one region find. Make sure it does
|
||||
* not wrap by the shifting the bitmap if necessary
|
||||
*/
|
||||
shift = find_first_zero_bit(test, CLK_PHASE_POINT_NUM);
|
||||
if (shift != 0)
|
||||
meson_mmc_shift_map(test, shift);
|
||||
|
||||
while (start < CLK_PHASE_POINT_NUM) {
|
||||
meson_mmc_find_next_region(test, &start, &stop);
|
||||
|
||||
if ((stop - start) > size) {
|
||||
offset = start;
|
||||
size = stop - start;
|
||||
}
|
||||
|
||||
start = stop;
|
||||
}
|
||||
|
||||
/* Get the center point of the region */
|
||||
offset += (size / 2);
|
||||
|
||||
/* Shift the result back */
|
||||
offset = (offset + shift) % CLK_PHASE_POINT_NUM;
|
||||
|
||||
return offset;
|
||||
}
|
||||
|
||||
static int meson_mmc_clk_phase_tuning(struct mmc_host *mmc, u32 opcode,
|
||||
struct clk *clk)
|
||||
{
|
||||
int point, ret;
|
||||
DECLARE_BITMAP(test, CLK_PHASE_POINT_NUM);
|
||||
|
||||
dev_dbg(mmc_dev(mmc), "%s phase/delay tunning...\n",
|
||||
__clk_get_name(clk));
|
||||
bitmap_zero(test, CLK_PHASE_POINT_NUM);
|
||||
|
||||
/* Explore tuning points */
|
||||
for (point = 0; point < CLK_PHASE_POINT_NUM; point++) {
|
||||
clk_set_phase(clk, point * CLK_PHASE_STEP);
|
||||
ret = mmc_send_tuning(mmc, opcode, NULL);
|
||||
if (!ret)
|
||||
set_bit(point, test);
|
||||
}
|
||||
|
||||
/* Find the optimal tuning point and apply it */
|
||||
point = meson_mmc_find_tuning_point(test);
|
||||
if (point < 0)
|
||||
return point; /* tuning failed */
|
||||
|
||||
clk_set_phase(clk, point * CLK_PHASE_STEP);
|
||||
dev_dbg(mmc_dev(mmc), "success with phase: %d\n",
|
||||
clk_get_phase(clk));
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int meson_mmc_execute_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
static int meson_mmc_resampling_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct meson_host *host = mmc_priv(mmc);
|
||||
int adj = 0;
|
||||
unsigned int val, dly, max_dly, i;
|
||||
int ret;
|
||||
|
||||
/* enable signal resampling w/o delay */
|
||||
adj = ADJUST_ADJ_EN;
|
||||
writel(adj, host->regs + host->data->adjust);
|
||||
/* Resampling is done using the source clock */
|
||||
max_dly = DIV_ROUND_UP(clk_get_rate(host->mux_clk),
|
||||
clk_get_rate(host->mmc_clk));
|
||||
|
||||
return meson_mmc_clk_phase_tuning(mmc, opcode, host->rx_clk);
|
||||
val = readl(host->regs + host->data->adjust);
|
||||
val |= ADJUST_ADJ_EN;
|
||||
writel(val, host->regs + host->data->adjust);
|
||||
|
||||
if (mmc->doing_retune)
|
||||
dly = FIELD_GET(ADJUST_ADJ_DELAY_MASK, val) + 1;
|
||||
else
|
||||
dly = 0;
|
||||
|
||||
for (i = 0; i < max_dly; i++) {
|
||||
val &= ~ADJUST_ADJ_DELAY_MASK;
|
||||
val |= FIELD_PREP(ADJUST_ADJ_DELAY_MASK, (dly + i) % max_dly);
|
||||
writel(val, host->regs + host->data->adjust);
|
||||
|
||||
ret = mmc_send_tuning(mmc, opcode, NULL);
|
||||
if (!ret) {
|
||||
dev_dbg(mmc_dev(mmc), "resampling delay: %u\n",
|
||||
(dly + i) % max_dly);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
meson_mmc_reset_resampling(host);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
static int meson_mmc_prepare_ios_clock(struct meson_host *host,
|
||||
struct mmc_ios *ios)
|
||||
{
|
||||
bool ddr;
|
||||
|
||||
switch (ios->timing) {
|
||||
case MMC_TIMING_MMC_DDR52:
|
||||
case MMC_TIMING_UHS_DDR50:
|
||||
ddr = true;
|
||||
break;
|
||||
|
||||
default:
|
||||
ddr = false;
|
||||
break;
|
||||
}
|
||||
|
||||
return meson_mmc_clk_set(host, ios->clock, ddr);
|
||||
}
|
||||
|
||||
static void meson_mmc_check_resampling(struct meson_host *host,
|
||||
struct mmc_ios *ios)
|
||||
{
|
||||
switch (ios->timing) {
|
||||
case MMC_TIMING_LEGACY:
|
||||
case MMC_TIMING_MMC_HS:
|
||||
case MMC_TIMING_SD_HS:
|
||||
case MMC_TIMING_MMC_DDR52:
|
||||
meson_mmc_disable_resampling(host);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
|
@ -775,12 +601,6 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
if (!IS_ERR(mmc->supply.vmmc))
|
||||
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
|
||||
|
||||
/* disable signal resampling */
|
||||
writel(0, host->regs + host->data->adjust);
|
||||
|
||||
/* Reset rx phase */
|
||||
clk_set_phase(host->rx_clk, 0);
|
||||
|
||||
break;
|
||||
|
||||
case MMC_POWER_ON:
|
||||
|
@ -817,20 +637,13 @@ static void meson_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
val = readl(host->regs + SD_EMMC_CFG);
|
||||
val &= ~CFG_BUS_WIDTH_MASK;
|
||||
val |= FIELD_PREP(CFG_BUS_WIDTH_MASK, bus_width);
|
||||
writel(val, host->regs + SD_EMMC_CFG);
|
||||
|
||||
val &= ~CFG_DDR;
|
||||
if (meson_mmc_timing_is_ddr(ios))
|
||||
val |= CFG_DDR;
|
||||
|
||||
val &= ~CFG_CHK_DS;
|
||||
if (ios->timing == MMC_TIMING_MMC_HS400)
|
||||
val |= CFG_CHK_DS;
|
||||
|
||||
err = meson_mmc_clk_set(host, ios);
|
||||
meson_mmc_check_resampling(host, ios);
|
||||
err = meson_mmc_prepare_ios_clock(host, ios);
|
||||
if (err)
|
||||
dev_err(host->dev, "Failed to set clock: %d\n,", err);
|
||||
|
||||
writel(val, host->regs + SD_EMMC_CFG);
|
||||
dev_dbg(host->dev, "SD_EMMC_CFG: 0x%08x\n", val);
|
||||
}
|
||||
|
||||
|
@ -1081,9 +894,6 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
out:
|
||||
/* ack all enabled interrupts */
|
||||
writel(irq_en, host->regs + SD_EMMC_STATUS);
|
||||
|
||||
if (cmd->error) {
|
||||
/* Stop desc in case of errors */
|
||||
u32 start = readl(host->regs + SD_EMMC_START);
|
||||
|
@ -1095,12 +905,14 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
|
|||
if (ret == IRQ_HANDLED)
|
||||
meson_mmc_request_done(host->mmc, cmd->mrq);
|
||||
|
||||
/* ack all raised interrupts */
|
||||
writel(status, host->regs + SD_EMMC_STATUS);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int meson_mmc_wait_desc_stop(struct meson_host *host)
|
||||
{
|
||||
int loop;
|
||||
u32 status;
|
||||
|
||||
/*
|
||||
|
@ -1110,20 +922,10 @@ static int meson_mmc_wait_desc_stop(struct meson_host *host)
|
|||
* If we don't confirm the descriptor is stopped, it might raise new
|
||||
* IRQs after we have called mmc_request_done() which is bad.
|
||||
*/
|
||||
for (loop = 50; loop; loop--) {
|
||||
status = readl(host->regs + SD_EMMC_STATUS);
|
||||
if (status & (STATUS_BUSY | STATUS_DESC_BUSY))
|
||||
udelay(100);
|
||||
else
|
||||
break;
|
||||
}
|
||||
|
||||
if (status & (STATUS_BUSY | STATUS_DESC_BUSY)) {
|
||||
dev_err(host->dev, "Timed out waiting for host to stop\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
return readl_poll_timeout(host->regs + SD_EMMC_STATUS, status,
|
||||
!(status & (STATUS_BUSY | STATUS_DESC_BUSY)),
|
||||
100, 5000);
|
||||
}
|
||||
|
||||
static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
|
||||
|
@ -1227,7 +1029,7 @@ static const struct mmc_host_ops meson_mmc_ops = {
|
|||
.get_cd = meson_mmc_get_cd,
|
||||
.pre_req = meson_mmc_pre_req,
|
||||
.post_req = meson_mmc_post_req,
|
||||
.execute_tuning = meson_mmc_execute_tuning,
|
||||
.execute_tuning = meson_mmc_resampling_tuning,
|
||||
.card_busy = meson_mmc_card_busy,
|
||||
.start_signal_voltage_switch = meson_mmc_voltage_switch,
|
||||
};
|
||||
|
@ -1338,7 +1140,7 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
|||
host->regs + SD_EMMC_IRQ_EN);
|
||||
|
||||
ret = request_threaded_irq(host->irq, meson_mmc_irq,
|
||||
meson_mmc_irq_thread, IRQF_SHARED,
|
||||
meson_mmc_irq_thread, IRQF_ONESHOT,
|
||||
dev_name(&pdev->dev), host);
|
||||
if (ret)
|
||||
goto err_init_clk;
|
||||
|
@ -1349,6 +1151,13 @@ static int meson_mmc_probe(struct platform_device *pdev)
|
|||
mmc->max_segs = SD_EMMC_DESC_BUF_LEN / sizeof(struct sd_emmc_desc);
|
||||
mmc->max_seg_size = mmc->max_req_size;
|
||||
|
||||
/*
|
||||
* At the moment, we don't know how to reliably enable HS400.
|
||||
* From the different datasheets, it is not even clear if this mode
|
||||
* is officially supported by any of the SoCs
|
||||
*/
|
||||
mmc->caps2 &= ~MMC_CAP2_HS400;
|
||||
|
||||
/* data bounce buffer */
|
||||
host->bounce_buf_size = mmc->max_req_size;
|
||||
host->bounce_buf =
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* mmc_spi.c - Access SD/MMC cards through SPI master controllers
|
||||
* Access SD/MMC cards through SPI master controllers
|
||||
*
|
||||
* (C) Copyright 2005, Intec Automation,
|
||||
* Mike Lavender (mike@steroidmicros)
|
||||
|
@ -8,21 +9,6 @@
|
|||
* Hans-Peter Nilsson (hp@axis.com)
|
||||
* (C) Copyright 2007, ATRON electronic GmbH,
|
||||
* Jan Nikitenko <jan.nikitenko@gmail.com>
|
||||
*
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License as published by
|
||||
* the Free Software Foundation; either version 2 of the License, or
|
||||
* (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, write to the Free Software
|
||||
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
||||
*/
|
||||
#include <linux/sched.h>
|
||||
#include <linux/delay.h>
|
||||
|
@ -220,7 +206,7 @@ static int mmc_spi_skip(struct mmc_spi_host *host, unsigned long timeout,
|
|||
* We use jiffies here because we want to have a relation
|
||||
* between elapsed time and the blocking of the scheduler.
|
||||
*/
|
||||
if (time_is_before_jiffies(start+1))
|
||||
if (time_is_before_jiffies(start + 1))
|
||||
schedule();
|
||||
}
|
||||
return -ETIMEDOUT;
|
||||
|
@ -467,8 +453,8 @@ mmc_spi_command_send(struct mmc_spi_host *host,
|
|||
memset(cp, 0xff, sizeof(data->status));
|
||||
|
||||
cp[1] = 0x40 | cmd->opcode;
|
||||
put_unaligned_be32(cmd->arg, cp+2);
|
||||
cp[6] = crc7_be(0, cp+1, 5) | 0x01;
|
||||
put_unaligned_be32(cmd->arg, cp + 2);
|
||||
cp[6] = crc7_be(0, cp + 1, 5) | 0x01;
|
||||
cp += 7;
|
||||
|
||||
/* Then, read up to 13 bytes (while writing all-ones):
|
||||
|
@ -642,9 +628,7 @@ mmc_spi_setup_data_message(
|
|||
if (multiple || direction == DMA_TO_DEVICE) {
|
||||
t = &host->early_status;
|
||||
memset(t, 0, sizeof(*t));
|
||||
t->len = (direction == DMA_TO_DEVICE)
|
||||
? sizeof(scratch->status)
|
||||
: 1;
|
||||
t->len = (direction == DMA_TO_DEVICE) ? sizeof(scratch->status) : 1;
|
||||
t->tx_buf = host->ones;
|
||||
t->tx_dma = host->ones_dma;
|
||||
t->rx_buf = scratch->status;
|
||||
|
@ -677,8 +661,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,
|
|||
u32 pattern;
|
||||
|
||||
if (host->mmc->use_spi_crc)
|
||||
scratch->crc_val = cpu_to_be16(
|
||||
crc_itu_t(0, t->tx_buf, t->len));
|
||||
scratch->crc_val = cpu_to_be16(crc_itu_t(0, t->tx_buf, t->len));
|
||||
if (host->dma_dev)
|
||||
dma_sync_single_for_device(host->dma_dev,
|
||||
host->data_dma, sizeof(*scratch),
|
||||
|
@ -819,6 +802,10 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
|
|||
}
|
||||
|
||||
status = spi_sync_locked(spi, &host->m);
|
||||
if (status < 0) {
|
||||
dev_dbg(&spi->dev, "read error %d\n", status);
|
||||
return status;
|
||||
}
|
||||
|
||||
if (host->dma_dev) {
|
||||
dma_sync_single_for_cpu(host->dma_dev,
|
||||
|
@ -855,8 +842,8 @@ mmc_spi_readblock(struct mmc_spi_host *host, struct spi_transfer *t,
|
|||
|
||||
be16_to_cpus(&scratch->crc_val);
|
||||
if (scratch->crc_val != crc) {
|
||||
dev_dbg(&spi->dev, "read - crc error: crc_val=0x%04x, "
|
||||
"computed=0x%04x len=%d\n",
|
||||
dev_dbg(&spi->dev,
|
||||
"read - crc error: crc_val=0x%04x, computed=0x%04x len=%d\n",
|
||||
scratch->crc_val, crc, t->len);
|
||||
return -EILSEQ;
|
||||
}
|
||||
|
@ -945,9 +932,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
|
|||
|
||||
dev_dbg(&host->spi->dev,
|
||||
" mmc_spi: %s block, %d bytes\n",
|
||||
(direction == DMA_TO_DEVICE)
|
||||
? "write"
|
||||
: "read",
|
||||
(direction == DMA_TO_DEVICE) ? "write" : "read",
|
||||
t->len);
|
||||
|
||||
if (direction == DMA_TO_DEVICE)
|
||||
|
@ -974,8 +959,7 @@ mmc_spi_data_do(struct mmc_spi_host *host, struct mmc_command *cmd,
|
|||
if (status < 0) {
|
||||
data->error = status;
|
||||
dev_dbg(&spi->dev, "%s status %d\n",
|
||||
(direction == DMA_TO_DEVICE)
|
||||
? "write" : "read",
|
||||
(direction == DMA_TO_DEVICE) ? "write" : "read",
|
||||
status);
|
||||
break;
|
||||
}
|
||||
|
@ -1249,8 +1233,7 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
|||
mres = spi_setup(host->spi);
|
||||
if (mres < 0)
|
||||
dev_dbg(&host->spi->dev,
|
||||
"switch back to SPI mode 3"
|
||||
" failed\n");
|
||||
"switch back to SPI mode 3 failed\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1470,7 +1453,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
|||
return 0;
|
||||
|
||||
fail_add_host:
|
||||
mmc_remove_host (mmc);
|
||||
mmc_remove_host(mmc);
|
||||
fail_glue_init:
|
||||
if (host->dma_dev)
|
||||
dma_unmap_single(host->dma_dev, host->data_dma,
|
||||
|
@ -1485,7 +1468,6 @@ static int mmc_spi_probe(struct spi_device *spi)
|
|||
fail_nobuf1:
|
||||
mmc_free_host(mmc);
|
||||
mmc_spi_put_pdata(spi);
|
||||
dev_set_drvdata(&spi->dev, NULL);
|
||||
|
||||
nomem:
|
||||
kfree(ones);
|
||||
|
@ -1496,10 +1478,7 @@ static int mmc_spi_probe(struct spi_device *spi)
|
|||
static int mmc_spi_remove(struct spi_device *spi)
|
||||
{
|
||||
struct mmc_host *mmc = dev_get_drvdata(&spi->dev);
|
||||
struct mmc_spi_host *host;
|
||||
|
||||
if (mmc) {
|
||||
host = mmc_priv(mmc);
|
||||
struct mmc_spi_host *host = mmc_priv(mmc);
|
||||
|
||||
/* prevent new mmc_detect_change() calls */
|
||||
if (host->pdata && host->pdata->exit)
|
||||
|
@ -1520,8 +1499,6 @@ static int mmc_spi_remove(struct spi_device *spi)
|
|||
spi->max_speed_hz = mmc->f_max;
|
||||
mmc_free_host(mmc);
|
||||
mmc_spi_put_pdata(spi);
|
||||
dev_set_drvdata(&spi->dev, NULL);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -1542,8 +1519,7 @@ static struct spi_driver mmc_spi_driver = {
|
|||
|
||||
module_spi_driver(mmc_spi_driver);
|
||||
|
||||
MODULE_AUTHOR("Mike Lavender, David Brownell, "
|
||||
"Hans-Peter Nilsson, Jan Nikitenko");
|
||||
MODULE_AUTHOR("Mike Lavender, David Brownell, Hans-Peter Nilsson, Jan Nikitenko");
|
||||
MODULE_DESCRIPTION("SPI SD/MMC host driver");
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_ALIAS("spi:mmc_spi");
|
||||
|
|
|
@ -43,21 +43,11 @@
|
|||
#include <asm/io.h>
|
||||
|
||||
#include "mmci.h"
|
||||
#include "mmci_qcom_dml.h"
|
||||
|
||||
#define DRIVER_NAME "mmci-pl18x"
|
||||
|
||||
#ifdef CONFIG_DMA_ENGINE
|
||||
void mmci_variant_init(struct mmci_host *host);
|
||||
#else
|
||||
static inline void mmci_variant_init(struct mmci_host *host) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMC_STM32_SDMMC
|
||||
void sdmmc_variant_init(struct mmci_host *host);
|
||||
#else
|
||||
static inline void sdmmc_variant_init(struct mmci_host *host) {}
|
||||
#endif
|
||||
static void mmci_variant_init(struct mmci_host *host);
|
||||
static void ux500v2_variant_init(struct mmci_host *host);
|
||||
|
||||
static unsigned int fmax = 515633;
|
||||
|
||||
|
@ -70,7 +60,6 @@ static struct variant_data variant_arm = {
|
|||
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
||||
.datalength_bits = 16,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.pwrreg_powerup = MCI_PWR_UP,
|
||||
.f_max = 100000000,
|
||||
.reversed_irq_handling = true,
|
||||
|
@ -90,7 +79,6 @@ static struct variant_data variant_arm_extended_fifo = {
|
|||
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
||||
.datalength_bits = 16,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.pwrreg_powerup = MCI_PWR_UP,
|
||||
.f_max = 100000000,
|
||||
.mmcimask1 = true,
|
||||
|
@ -110,7 +98,6 @@ static struct variant_data variant_arm_extended_fifo_hwfc = {
|
|||
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
||||
.datalength_bits = 16,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.pwrreg_powerup = MCI_PWR_UP,
|
||||
.f_max = 100000000,
|
||||
.mmcimask1 = true,
|
||||
|
@ -131,7 +118,6 @@ static struct variant_data variant_u300 = {
|
|||
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
||||
.datalength_bits = 16,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
||||
.st_sdio = true,
|
||||
.pwrreg_powerup = MCI_PWR_ON,
|
||||
|
@ -157,7 +143,6 @@ static struct variant_data variant_nomadik = {
|
|||
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
||||
.datalength_bits = 24,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
||||
.st_sdio = true,
|
||||
.st_clkdiv = true,
|
||||
|
@ -186,7 +171,6 @@ static struct variant_data variant_ux500 = {
|
|||
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
||||
.datalength_bits = 24,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
||||
.st_sdio = true,
|
||||
.st_clkdiv = true,
|
||||
|
@ -220,11 +204,9 @@ static struct variant_data variant_ux500v2 = {
|
|||
.datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE,
|
||||
.datalength_bits = 24,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
||||
.st_sdio = true,
|
||||
.st_clkdiv = true,
|
||||
.blksz_datactrl16 = true,
|
||||
.pwrreg_powerup = MCI_PWR_ON,
|
||||
.f_max = 100000000,
|
||||
.signal_direction = true,
|
||||
|
@ -238,7 +220,7 @@ static struct variant_data variant_ux500v2 = {
|
|||
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
||||
.start_err = MCI_STARTBITERR,
|
||||
.opendrain = MCI_OD,
|
||||
.init = mmci_variant_init,
|
||||
.init = ux500v2_variant_init,
|
||||
};
|
||||
|
||||
static struct variant_data variant_stm32 = {
|
||||
|
@ -255,7 +237,6 @@ static struct variant_data variant_stm32 = {
|
|||
.irq_pio_mask = MCI_IRQ_PIO_MASK,
|
||||
.datalength_bits = 24,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN,
|
||||
.st_sdio = true,
|
||||
.st_clkdiv = true,
|
||||
|
@ -299,10 +280,8 @@ static struct variant_data variant_qcom = {
|
|||
.cmdreg_srsp_crc = MCI_CPSM_RESPONSE,
|
||||
.cmdreg_srsp = MCI_CPSM_RESPONSE,
|
||||
.data_cmd_enable = MCI_CPSM_QCOM_DATCMD,
|
||||
.blksz_datactrl4 = true,
|
||||
.datalength_bits = 24,
|
||||
.datactrl_blocksz = 11,
|
||||
.datactrl_dpsm_enable = MCI_DPSM_ENABLE,
|
||||
.pwrreg_powerup = MCI_PWR_UP,
|
||||
.f_max = 208000000,
|
||||
.explicit_mclk_control = true,
|
||||
|
@ -624,6 +603,16 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
|
|||
sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
|
||||
}
|
||||
|
||||
static u32 mmci_get_dctrl_cfg(struct mmci_host *host)
|
||||
{
|
||||
return MCI_DPSM_ENABLE | mmci_dctrl_blksz(host);
|
||||
}
|
||||
|
||||
static u32 ux500v2_get_dctrl_cfg(struct mmci_host *host)
|
||||
{
|
||||
return MCI_DPSM_ENABLE | (host->data->blksz << 16);
|
||||
}
|
||||
|
||||
/*
|
||||
* All the DMA operation mode stuff goes inside this ifdef.
|
||||
* This assumes that you have a generic DMA device interface,
|
||||
|
@ -886,15 +875,11 @@ int mmci_dmae_prep_data(struct mmci_host *host,
|
|||
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl)
|
||||
{
|
||||
struct mmci_dmae_priv *dmae = host->dma_priv;
|
||||
struct mmc_data *data = host->data;
|
||||
|
||||
host->dma_in_progress = true;
|
||||
dmaengine_submit(dmae->desc_current);
|
||||
dma_async_issue_pending(dmae->cur);
|
||||
|
||||
if (host->variant->qcom_dml)
|
||||
dml_start_xfer(host, data);
|
||||
|
||||
*datactrl |= MCI_DPSM_DMAENABLE;
|
||||
|
||||
return 0;
|
||||
|
@ -952,6 +937,7 @@ void mmci_dmae_unprep_data(struct mmci_host *host,
|
|||
static struct mmci_host_ops mmci_variant_ops = {
|
||||
.prep_data = mmci_dmae_prep_data,
|
||||
.unprep_data = mmci_dmae_unprep_data,
|
||||
.get_datactrl_cfg = mmci_get_dctrl_cfg,
|
||||
.get_next_data = mmci_dmae_get_next_data,
|
||||
.dma_setup = mmci_dmae_setup,
|
||||
.dma_release = mmci_dmae_release,
|
||||
|
@ -959,12 +945,22 @@ static struct mmci_host_ops mmci_variant_ops = {
|
|||
.dma_finalize = mmci_dmae_finalize,
|
||||
.dma_error = mmci_dmae_error,
|
||||
};
|
||||
#else
|
||||
static struct mmci_host_ops mmci_variant_ops = {
|
||||
.get_datactrl_cfg = mmci_get_dctrl_cfg,
|
||||
};
|
||||
#endif
|
||||
|
||||
void mmci_variant_init(struct mmci_host *host)
|
||||
{
|
||||
host->ops = &mmci_variant_ops;
|
||||
}
|
||||
#endif
|
||||
|
||||
void ux500v2_variant_init(struct mmci_host *host)
|
||||
{
|
||||
host->ops = &mmci_variant_ops;
|
||||
host->ops->get_datactrl_cfg = ux500v2_get_dctrl_cfg;
|
||||
}
|
||||
|
||||
static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
{
|
||||
|
@ -1000,7 +996,6 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|||
unsigned int datactrl, timeout, irqmask;
|
||||
unsigned long long clks;
|
||||
void __iomem *base;
|
||||
int blksz_bits;
|
||||
|
||||
dev_dbg(mmc_dev(host->mmc), "blksz %04x blks %04x flags %08x\n",
|
||||
data->blksz, data->blocks, data->flags);
|
||||
|
@ -1018,18 +1013,8 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
|||
writel(timeout, base + MMCIDATATIMER);
|
||||
writel(host->size, base + MMCIDATALENGTH);
|
||||
|
||||
blksz_bits = ffs(data->blksz) - 1;
|
||||
BUG_ON(1 << blksz_bits != data->blksz);
|
||||
|
||||
if (variant->blksz_datactrl16)
|
||||
datactrl = variant->datactrl_dpsm_enable | (data->blksz << 16);
|
||||
else if (variant->blksz_datactrl4)
|
||||
datactrl = variant->datactrl_dpsm_enable | (data->blksz << 4);
|
||||
else
|
||||
datactrl = variant->datactrl_dpsm_enable | blksz_bits << 4;
|
||||
|
||||
if (data->flags & MMC_DATA_READ)
|
||||
datactrl |= MCI_DPSM_DIRECTION;
|
||||
datactrl = host->ops->get_datactrl_cfg(host);
|
||||
datactrl |= host->data->flags & MMC_DATA_READ ? MCI_DPSM_DIRECTION : 0;
|
||||
|
||||
if (host->mmc->card && mmc_card_sdio(host->mmc->card)) {
|
||||
u32 clk;
|
||||
|
@ -1220,12 +1205,13 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|||
unsigned int status)
|
||||
{
|
||||
void __iomem *base = host->base;
|
||||
bool sbc;
|
||||
bool sbc, busy_resp;
|
||||
|
||||
if (!cmd)
|
||||
return;
|
||||
|
||||
sbc = (cmd == host->mrq->sbc);
|
||||
busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
|
||||
|
||||
/*
|
||||
* We need to be one of these interrupts to be considered worth
|
||||
|
@ -1239,8 +1225,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|||
/*
|
||||
* ST Micro variant: handle busy detection.
|
||||
*/
|
||||
if (host->variant->busy_detect) {
|
||||
bool busy_resp = !!(cmd->flags & MMC_RSP_BUSY);
|
||||
if (busy_resp && host->variant->busy_detect) {
|
||||
|
||||
/* We are busy with a command, return */
|
||||
if (host->busy_status &&
|
||||
|
@ -1253,7 +1238,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
|
|||
* that the special busy status bit is still set before
|
||||
* proceeding.
|
||||
*/
|
||||
if (!host->busy_status && busy_resp &&
|
||||
if (!host->busy_status &&
|
||||
!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
|
||||
(readl(base + MMCISTATUS) & host->variant->busy_detect_flag)) {
|
||||
|
||||
|
@ -1550,9 +1535,10 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
|
|||
}
|
||||
|
||||
/*
|
||||
* Don't poll for busy completion in irq context.
|
||||
* Busy detection has been handled by mmci_cmd_irq() above.
|
||||
* Clear the status bit to prevent polling in IRQ context.
|
||||
*/
|
||||
if (host->variant->busy_detect && host->busy_status)
|
||||
if (host->variant->busy_detect_flag)
|
||||
status &= ~host->variant->busy_detect_flag;
|
||||
|
||||
ret = 1;
|
||||
|
|
|
@ -131,6 +131,11 @@
|
|||
/* Control register extensions in the Qualcomm versions */
|
||||
#define MCI_DPSM_QCOM_DATA_PEND BIT(17)
|
||||
#define MCI_DPSM_QCOM_RX_DATA_PEND BIT(20)
|
||||
/* Control register extensions in STM32 versions */
|
||||
#define MCI_DPSM_STM32_MODE_BLOCK (0 << 2)
|
||||
#define MCI_DPSM_STM32_MODE_SDIO (1 << 2)
|
||||
#define MCI_DPSM_STM32_MODE_STREAM (2 << 2)
|
||||
#define MCI_DPSM_STM32_MODE_BLOCK_STOP (3 << 2)
|
||||
|
||||
#define MMCIDATACNT 0x030
|
||||
#define MMCISTATUS 0x034
|
||||
|
@ -275,12 +280,8 @@ struct mmci_host;
|
|||
* @st_clkdiv: true if using a ST-specific clock divider algorithm
|
||||
* @stm32_clkdiv: true if using a STM32-specific clock divider algorithm
|
||||
* @datactrl_mask_ddrmode: ddr mode mask in datactrl register.
|
||||
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
|
||||
* @blksz_datactrl4: true if Block size is at b4..b16 position in datactrl
|
||||
* register
|
||||
* @datactrl_mask_sdio: SDIO enable mask in datactrl register
|
||||
* @datactrl_blksz: block size in power of two
|
||||
* @datactrl_dpsm_enable: enable value for DPSM
|
||||
* @datactrl_first: true if data must be setup before send command
|
||||
* @datacnt_useless: true if you could not use datacnt register to read
|
||||
* remaining data
|
||||
|
@ -325,14 +326,11 @@ struct variant_data {
|
|||
unsigned int datactrl_mask_ddrmode;
|
||||
unsigned int datactrl_mask_sdio;
|
||||
unsigned int datactrl_blocksz;
|
||||
unsigned int datactrl_dpsm_enable;
|
||||
u8 datactrl_first:1;
|
||||
u8 datacnt_useless:1;
|
||||
u8 st_sdio:1;
|
||||
u8 st_clkdiv:1;
|
||||
u8 stm32_clkdiv:1;
|
||||
u8 blksz_datactrl16:1;
|
||||
u8 blksz_datactrl4:1;
|
||||
u32 pwrreg_powerup;
|
||||
u32 f_max;
|
||||
u8 signal_direction:1;
|
||||
|
@ -362,6 +360,7 @@ struct mmci_host_ops {
|
|||
bool next);
|
||||
void (*unprep_data)(struct mmci_host *host, struct mmc_data *data,
|
||||
int err);
|
||||
u32 (*get_datactrl_cfg)(struct mmci_host *host);
|
||||
void (*get_next_data)(struct mmci_host *host, struct mmc_data *data);
|
||||
int (*dma_setup)(struct mmci_host *host);
|
||||
void (*dma_release)(struct mmci_host *host);
|
||||
|
@ -429,6 +428,12 @@ struct mmci_host {
|
|||
void mmci_write_clkreg(struct mmci_host *host, u32 clk);
|
||||
void mmci_write_pwrreg(struct mmci_host *host, u32 pwr);
|
||||
|
||||
static inline u32 mmci_dctrl_blksz(struct mmci_host *host)
|
||||
{
|
||||
return (ffs(host->data->blksz) - 1) << 4;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DMA_ENGINE
|
||||
int mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data,
|
||||
bool next);
|
||||
void mmci_dmae_unprep_data(struct mmci_host *host, struct mmc_data *data,
|
||||
|
@ -439,3 +444,16 @@ void mmci_dmae_release(struct mmci_host *host);
|
|||
int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl);
|
||||
void mmci_dmae_finalize(struct mmci_host *host, struct mmc_data *data);
|
||||
void mmci_dmae_error(struct mmci_host *host);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMC_QCOM_DML
|
||||
void qcom_variant_init(struct mmci_host *host);
|
||||
#else
|
||||
static inline void qcom_variant_init(struct mmci_host *host) {}
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_MMC_STM32_SDMMC
|
||||
void sdmmc_variant_init(struct mmci_host *host);
|
||||
#else
|
||||
static inline void sdmmc_variant_init(struct mmci_host *host) {}
|
||||
#endif
|
||||
|
|
|
@ -54,10 +54,15 @@
|
|||
|
||||
#define DML_OFFSET 0x800
|
||||
|
||||
void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
|
||||
static int qcom_dma_start(struct mmci_host *host, unsigned int *datactrl)
|
||||
{
|
||||
u32 config;
|
||||
void __iomem *base = host->base + DML_OFFSET;
|
||||
struct mmc_data *data = host->data;
|
||||
int ret = mmci_dmae_start(host, datactrl);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
/* Read operation: configure DML for producer operation */
|
||||
|
@ -96,6 +101,7 @@ void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
|
|||
|
||||
/* make sure the dml is configured before dma is triggered */
|
||||
wmb();
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int of_get_dml_pipe_index(struct device_node *np, const char *name)
|
||||
|
@ -133,7 +139,6 @@ static int qcom_dma_setup(struct mmci_host *host)
|
|||
producer_id = of_get_dml_pipe_index(np, "rx");
|
||||
|
||||
if (producer_id < 0 || consumer_id < 0) {
|
||||
host->variant->qcom_dml = false;
|
||||
mmci_dmae_release(host);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -183,13 +188,19 @@ static int qcom_dma_setup(struct mmci_host *host)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static u32 qcom_get_dctrl_cfg(struct mmci_host *host)
|
||||
{
|
||||
return MCI_DPSM_ENABLE | (host->data->blksz << 4);
|
||||
}
|
||||
|
||||
static struct mmci_host_ops qcom_variant_ops = {
|
||||
.prep_data = mmci_dmae_prep_data,
|
||||
.unprep_data = mmci_dmae_unprep_data,
|
||||
.get_datactrl_cfg = qcom_get_dctrl_cfg,
|
||||
.get_next_data = mmci_dmae_get_next_data,
|
||||
.dma_setup = qcom_dma_setup,
|
||||
.dma_release = mmci_dmae_release,
|
||||
.dma_start = mmci_dmae_start,
|
||||
.dma_start = qcom_dma_start,
|
||||
.dma_finalize = mmci_dmae_finalize,
|
||||
.dma_error = mmci_dmae_error,
|
||||
};
|
||||
|
|
|
@ -1,30 +0,0 @@
|
|||
/*
|
||||
*
|
||||
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
* only version 2 as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#ifndef __MMC_QCOM_DML_H__
|
||||
#define __MMC_QCOM_DML_H__
|
||||
|
||||
#ifdef CONFIG_MMC_QCOM_DML
|
||||
void qcom_variant_init(struct mmci_host *host);
|
||||
void dml_start_xfer(struct mmci_host *host, struct mmc_data *data);
|
||||
#else
|
||||
static inline void qcom_variant_init(struct mmci_host *host)
|
||||
{
|
||||
}
|
||||
static inline void dml_start_xfer(struct mmci_host *host, struct mmc_data *data)
|
||||
{
|
||||
}
|
||||
#endif /* CONFIG_MMC_QCOM_DML */
|
||||
|
||||
#endif /* __MMC_QCOM_DML_H__ */
|
|
@ -265,10 +265,28 @@ static void mmci_sdmmc_set_pwrreg(struct mmci_host *host, unsigned int pwr)
|
|||
}
|
||||
}
|
||||
|
||||
static u32 sdmmc_get_dctrl_cfg(struct mmci_host *host)
|
||||
{
|
||||
u32 datactrl;
|
||||
|
||||
datactrl = mmci_dctrl_blksz(host);
|
||||
|
||||
if (host->mmc->card && mmc_card_sdio(host->mmc->card) &&
|
||||
host->data->blocks == 1)
|
||||
datactrl |= MCI_DPSM_STM32_MODE_SDIO;
|
||||
else if (host->data->stop && !host->mrq->sbc)
|
||||
datactrl |= MCI_DPSM_STM32_MODE_BLOCK_STOP;
|
||||
else
|
||||
datactrl |= MCI_DPSM_STM32_MODE_BLOCK;
|
||||
|
||||
return datactrl;
|
||||
}
|
||||
|
||||
static struct mmci_host_ops sdmmc_variant_ops = {
|
||||
.validate_data = sdmmc_idma_validate_data,
|
||||
.prep_data = sdmmc_idma_prep_data,
|
||||
.unprep_data = sdmmc_idma_unprep_data,
|
||||
.get_datactrl_cfg = sdmmc_get_dctrl_cfg,
|
||||
.dma_setup = sdmmc_idma_setup,
|
||||
.dma_start = sdmmc_idma_start,
|
||||
.dma_finalize = sdmmc_idma_finalize,
|
||||
|
|
|
@ -300,6 +300,8 @@
|
|||
#define CMD_TIMEOUT (HZ/10 * 5) /* 100ms x5 */
|
||||
#define DAT_TIMEOUT (HZ * 5) /* 1000ms x5 */
|
||||
|
||||
#define DEFAULT_DEBOUNCE (8) /* 8 cycles CD debounce */
|
||||
|
||||
#define PAD_DELAY_MAX 32 /* PAD delay cells */
|
||||
/*--------------------------------------------------------------------------*/
|
||||
/* Descriptor Structure */
|
||||
|
@ -372,6 +374,7 @@ struct mtk_mmc_compatible {
|
|||
bool stop_clk_fix;
|
||||
bool enhance_rx;
|
||||
bool support_64g;
|
||||
bool use_internal_cd;
|
||||
};
|
||||
|
||||
struct msdc_tune_para {
|
||||
|
@ -430,6 +433,7 @@ struct msdc_host {
|
|||
bool hs400_cmd_resp_sel_rising;
|
||||
/* cmd response sample selection for HS400 */
|
||||
bool hs400_mode; /* current eMMC will run at hs400 mode */
|
||||
bool internal_cd; /* Use internal card-detect logic */
|
||||
struct msdc_save_para save_para; /* used when gate HCLK */
|
||||
struct msdc_tune_para def_tune_para; /* default tune setting */
|
||||
struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */
|
||||
|
@ -507,6 +511,28 @@ static const struct mtk_mmc_compatible mt7622_compat = {
|
|||
.support_64g = false,
|
||||
};
|
||||
|
||||
static const struct mtk_mmc_compatible mt8516_compat = {
|
||||
.clk_div_bits = 12,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE0,
|
||||
.async_fifo = true,
|
||||
.data_tune = true,
|
||||
.busy_check = true,
|
||||
.stop_clk_fix = true,
|
||||
};
|
||||
|
||||
static const struct mtk_mmc_compatible mt7620_compat = {
|
||||
.clk_div_bits = 8,
|
||||
.hs400_tune = false,
|
||||
.pad_tune_reg = MSDC_PAD_TUNE,
|
||||
.async_fifo = false,
|
||||
.data_tune = false,
|
||||
.busy_check = false,
|
||||
.stop_clk_fix = false,
|
||||
.enhance_rx = false,
|
||||
.use_internal_cd = true,
|
||||
};
|
||||
|
||||
static const struct of_device_id msdc_of_ids[] = {
|
||||
{ .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat},
|
||||
{ .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat},
|
||||
|
@ -514,6 +540,8 @@ static const struct of_device_id msdc_of_ids[] = {
|
|||
{ .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat},
|
||||
{ .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat},
|
||||
{ .compatible = "mediatek,mt7622-mmc", .data = &mt7622_compat},
|
||||
{ .compatible = "mediatek,mt8516-mmc", .data = &mt8516_compat},
|
||||
{ .compatible = "mediatek,mt7620-mmc", .data = &mt7620_compat},
|
||||
{}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, msdc_of_ids);
|
||||
|
@ -1407,6 +1435,12 @@ static irqreturn_t msdc_irq(int irq, void *dev_id)
|
|||
sdio_signal_irq(host->mmc);
|
||||
}
|
||||
|
||||
if ((events & event_mask) & MSDC_INT_CDSC) {
|
||||
if (host->internal_cd)
|
||||
mmc_detect_change(host->mmc, msecs_to_jiffies(20));
|
||||
events &= ~MSDC_INT_CDSC;
|
||||
}
|
||||
|
||||
if (!(events & (event_mask & ~MSDC_INT_SDIOIRQ)))
|
||||
break;
|
||||
|
||||
|
@ -1440,14 +1474,24 @@ static void msdc_init_hw(struct msdc_host *host)
|
|||
/* Reset */
|
||||
msdc_reset_hw(host);
|
||||
|
||||
/* Disable card detection */
|
||||
sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
|
||||
|
||||
/* Disable and clear all interrupts */
|
||||
writel(0, host->base + MSDC_INTEN);
|
||||
val = readl(host->base + MSDC_INT);
|
||||
writel(val, host->base + MSDC_INT);
|
||||
|
||||
/* Configure card detection */
|
||||
if (host->internal_cd) {
|
||||
sdr_set_field(host->base + MSDC_PS, MSDC_PS_CDDEBOUNCE,
|
||||
DEFAULT_DEBOUNCE);
|
||||
sdr_set_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
|
||||
sdr_set_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
|
||||
sdr_set_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
|
||||
} else {
|
||||
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
|
||||
sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
|
||||
sdr_clr_bits(host->base + MSDC_INTEN, MSDC_INTEN_CDSC);
|
||||
}
|
||||
|
||||
if (host->top_base) {
|
||||
writel(0, host->top_base + EMMC_TOP_CONTROL);
|
||||
writel(0, host->top_base + EMMC_TOP_CMD);
|
||||
|
@ -1557,6 +1601,13 @@ static void msdc_init_hw(struct msdc_host *host)
|
|||
static void msdc_deinit_hw(struct msdc_host *host)
|
||||
{
|
||||
u32 val;
|
||||
|
||||
if (host->internal_cd) {
|
||||
/* Disabled card-detect */
|
||||
sdr_clr_bits(host->base + MSDC_PS, MSDC_PS_CDEN);
|
||||
sdr_clr_bits(host->base + SDC_CFG, SDC_CFG_INSWKUP);
|
||||
}
|
||||
|
||||
/* Disable and clear all interrupts */
|
||||
writel(0, host->base + MSDC_INTEN);
|
||||
|
||||
|
@ -2055,13 +2106,31 @@ static void msdc_ack_sdio_irq(struct mmc_host *mmc)
|
|||
__msdc_enable_sdio_irq(mmc, 1);
|
||||
}
|
||||
|
||||
static int msdc_get_cd(struct mmc_host *mmc)
|
||||
{
|
||||
struct msdc_host *host = mmc_priv(mmc);
|
||||
int val;
|
||||
|
||||
if (mmc->caps & MMC_CAP_NONREMOVABLE)
|
||||
return 1;
|
||||
|
||||
if (!host->internal_cd)
|
||||
return mmc_gpio_get_cd(mmc);
|
||||
|
||||
val = readl(host->base + MSDC_PS) & MSDC_PS_CDSTS;
|
||||
if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH)
|
||||
return !!val;
|
||||
else
|
||||
return !val;
|
||||
}
|
||||
|
||||
static const struct mmc_host_ops mt_msdc_ops = {
|
||||
.post_req = msdc_post_req,
|
||||
.pre_req = msdc_pre_req,
|
||||
.request = msdc_ops_request,
|
||||
.set_ios = msdc_ops_set_ios,
|
||||
.get_ro = mmc_gpio_get_ro,
|
||||
.get_cd = mmc_gpio_get_cd,
|
||||
.get_cd = msdc_get_cd,
|
||||
.enable_sdio_irq = msdc_enable_sdio_irq,
|
||||
.ack_sdio_irq = msdc_ack_sdio_irq,
|
||||
.start_signal_voltage_switch = msdc_ops_switch_volt,
|
||||
|
@ -2123,9 +2192,11 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
|||
}
|
||||
|
||||
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
|
||||
if (res) {
|
||||
host->top_base = devm_ioremap_resource(&pdev->dev, res);
|
||||
if (IS_ERR(host->top_base))
|
||||
host->top_base = NULL;
|
||||
}
|
||||
|
||||
ret = mmc_regulator_get_supply(mmc);
|
||||
if (ret)
|
||||
|
@ -2191,6 +2262,16 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
|||
else
|
||||
mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095);
|
||||
|
||||
if (!(mmc->caps & MMC_CAP_NONREMOVABLE) &&
|
||||
!mmc_can_gpio_cd(mmc) &&
|
||||
host->dev_comp->use_internal_cd) {
|
||||
/*
|
||||
* Is removable but no GPIO declared, so
|
||||
* use internal functionality.
|
||||
*/
|
||||
host->internal_cd = true;
|
||||
}
|
||||
|
||||
if (mmc->caps & MMC_CAP_SDIO_IRQ)
|
||||
mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
|
||||
|
||||
|
@ -2227,7 +2308,7 @@ static int msdc_drv_probe(struct platform_device *pdev)
|
|||
msdc_init_hw(host);
|
||||
|
||||
ret = devm_request_irq(&pdev->dev, host->irq, msdc_irq,
|
||||
IRQF_TRIGGER_LOW | IRQF_ONESHOT, pdev->name, host);
|
||||
IRQF_TRIGGER_NONE, pdev->name, host);
|
||||
if (ret)
|
||||
goto release;
|
||||
|
||||
|
|
|
@ -648,7 +648,8 @@ static int mxs_mmc_probe(struct platform_device *pdev)
|
|||
/* set mmc core parameters */
|
||||
mmc->ops = &mxs_mmc_ops;
|
||||
mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED |
|
||||
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23;
|
||||
MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL | MMC_CAP_CMD23 |
|
||||
MMC_CAP_ERASE;
|
||||
|
||||
host->broken_cd = of_property_read_bool(np, "broken-cd");
|
||||
|
||||
|
|
|
@ -1,14 +1,10 @@
|
|||
// SPDX-License-Identifier: GPL-2.0-or-later
|
||||
/*
|
||||
* OpenFirmware bindings for the MMC-over-SPI driver
|
||||
*
|
||||
* Copyright (c) MontaVista Software, Inc. 2008.
|
||||
*
|
||||
* Author: Anton Vorontsov <avorontsov@ru.mvista.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
|
|
|
@ -2077,7 +2077,7 @@ static int omap_hsmmc_runtime_suspend(struct device *dev)
|
|||
unsigned long flags;
|
||||
int ret = 0;
|
||||
|
||||
host = platform_get_drvdata(to_platform_device(dev));
|
||||
host = dev_get_drvdata(dev);
|
||||
omap_hsmmc_context_save(host);
|
||||
dev_dbg(dev, "disabled\n");
|
||||
|
||||
|
@ -2118,7 +2118,7 @@ static int omap_hsmmc_runtime_resume(struct device *dev)
|
|||
struct omap_hsmmc_host *host;
|
||||
unsigned long flags;
|
||||
|
||||
host = platform_get_drvdata(to_platform_device(dev));
|
||||
host = dev_get_drvdata(dev);
|
||||
omap_hsmmc_context_restore(host);
|
||||
dev_dbg(dev, "enabled\n");
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
* Renesas Mobile SDHI
|
||||
*
|
||||
* Copyright (C) 2017 Horms Solutions Ltd., Simon Horman
|
||||
* Copyright (C) 2017 Renesas Electronics Corporation
|
||||
* Copyright (C) 2017-19 Renesas Electronics Corporation
|
||||
*/
|
||||
|
||||
#ifndef RENESAS_SDHI_H
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
/*
|
||||
* Renesas SDHI
|
||||
*
|
||||
* Copyright (C) 2015-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2015-19 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2009 Magnus Damm
|
||||
*
|
||||
|
@ -779,14 +779,14 @@ int renesas_sdhi_probe(struct platform_device *pdev,
|
|||
if (ver < SDHI_VER_GEN2_SDR104 && mmc_data->max_blk_count > U16_MAX)
|
||||
mmc_data->max_blk_count = U16_MAX;
|
||||
|
||||
ret = tmio_mmc_host_probe(host);
|
||||
if (ret < 0)
|
||||
goto edisclk;
|
||||
|
||||
/* One Gen2 SDHI incarnation does NOT have a CBSY bit */
|
||||
if (ver == SDHI_VER_GEN2_SDR50)
|
||||
mmc_data->flags &= ~TMIO_MMC_HAVE_CBSY;
|
||||
|
||||
ret = tmio_mmc_host_probe(host);
|
||||
if (ret < 0)
|
||||
goto edisclk;
|
||||
|
||||
/* Enable tuning iff we have an SCC and a supported mode */
|
||||
if (of_data && of_data->scc_offset &&
|
||||
(host->mmc->caps & MMC_CAP_UHS_SDR104 ||
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
/*
|
||||
* DMA support for Internal DMAC with SDHI SD/SDIO controller
|
||||
*
|
||||
* Copyright (C) 2016-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-19 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2018-19 Sang Engineering, Wolfram Sang
|
||||
*/
|
||||
|
||||
#include <linux/bitops.h>
|
||||
|
@ -95,8 +96,8 @@ static const struct renesas_sdhi_of_data of_rza2_compatible = {
|
|||
.scc_offset = 0 - 0x1000,
|
||||
.taps = rcar_gen3_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
|
||||
/* DMAC can handle 0xffffffff blk count but only 1 segment */
|
||||
.max_blk_count = 0xffffffff,
|
||||
/* DMAC can handle 32bit blk count but only 1 segment */
|
||||
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
|
||||
.max_segs = 1,
|
||||
};
|
||||
|
||||
|
@ -110,8 +111,8 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = {
|
|||
.scc_offset = 0x1000,
|
||||
.taps = rcar_gen3_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen3_scc_taps),
|
||||
/* DMAC can handle 0xffffffff blk count but only 1 segment */
|
||||
.max_blk_count = 0xffffffff,
|
||||
/* DMAC can handle 32bit blk count but only 1 segment */
|
||||
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
|
||||
.max_segs = 1,
|
||||
};
|
||||
|
||||
|
|
|
@ -2,8 +2,8 @@
|
|||
/*
|
||||
* DMA support use of SYS DMAC with SDHI SD/SDIO controller
|
||||
*
|
||||
* Copyright (C) 2016-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2016-19 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2017 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2010-2011 Guennadi Liakhovetski
|
||||
*/
|
||||
|
@ -65,7 +65,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen2_compatible = {
|
|||
.scc_offset = 0x0300,
|
||||
.taps = rcar_gen2_scc_taps,
|
||||
.taps_num = ARRAY_SIZE(rcar_gen2_scc_taps),
|
||||
.max_blk_count = 0xffffffff,
|
||||
.max_blk_count = UINT_MAX / TMIO_MAX_BLK_SIZE,
|
||||
};
|
||||
|
||||
/* Definitions for sampling clocks */
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <linux/clk.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/pm_qos.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/mmc.h>
|
||||
#include <linux/mmc/sdio.h>
|
||||
|
@ -73,6 +74,7 @@
|
|||
#define ESDHC_STROBE_DLL_CTRL_ENABLE (1 << 0)
|
||||
#define ESDHC_STROBE_DLL_CTRL_RESET (1 << 1)
|
||||
#define ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT 3
|
||||
#define ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT (4 << 20)
|
||||
|
||||
#define ESDHC_STROBE_DLL_STATUS 0x74
|
||||
#define ESDHC_STROBE_DLL_STS_REF_LOCK (1 << 1)
|
||||
|
@ -156,6 +158,8 @@
|
|||
#define ESDHC_FLAG_HS400_ES BIT(11)
|
||||
/* The IP has Host Controller Interface for Command Queuing */
|
||||
#define ESDHC_FLAG_CQHCI BIT(12)
|
||||
/* need request pmqos during low power */
|
||||
#define ESDHC_FLAG_PMQOS BIT(13)
|
||||
|
||||
struct esdhc_soc_data {
|
||||
u32 flags;
|
||||
|
@ -204,6 +208,12 @@ static const struct esdhc_soc_data usdhc_imx7d_data = {
|
|||
| ESDHC_FLAG_HS400,
|
||||
};
|
||||
|
||||
static struct esdhc_soc_data usdhc_imx7ulp_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
| ESDHC_FLAG_PMQOS | ESDHC_FLAG_HS400,
|
||||
};
|
||||
|
||||
static struct esdhc_soc_data usdhc_imx8qxp_data = {
|
||||
.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING
|
||||
| ESDHC_FLAG_HAVE_CAP1 | ESDHC_FLAG_HS200
|
||||
|
@ -229,6 +239,7 @@ struct pltfm_imx_data {
|
|||
WAIT_FOR_INT, /* sent CMD12, waiting for response INT */
|
||||
} multiblock_status;
|
||||
u32 is_ddr;
|
||||
struct pm_qos_request pm_qos_req;
|
||||
};
|
||||
|
||||
static const struct platform_device_id imx_esdhc_devtype[] = {
|
||||
|
@ -257,6 +268,7 @@ static const struct of_device_id imx_esdhc_dt_ids[] = {
|
|||
{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, },
|
||||
{ .compatible = "fsl,imx6ull-usdhc", .data = &usdhc_imx6ull_data, },
|
||||
{ .compatible = "fsl,imx7d-usdhc", .data = &usdhc_imx7d_data, },
|
||||
{ .compatible = "fsl,imx7ulp-usdhc", .data = &usdhc_imx7ulp_data, },
|
||||
{ .compatible = "fsl,imx8qxp-usdhc", .data = &usdhc_imx8qxp_data, },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
|
@ -983,15 +995,19 @@ static void esdhc_set_strobe_dll(struct sdhci_host *host)
|
|||
/* force a reset on strobe dll */
|
||||
writel(ESDHC_STROBE_DLL_CTRL_RESET,
|
||||
host->ioaddr + ESDHC_STROBE_DLL_CTRL);
|
||||
/* clear the reset bit on strobe dll before any setting */
|
||||
writel(0, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
|
||||
|
||||
/*
|
||||
* enable strobe dll ctrl and adjust the delay target
|
||||
* for the uSDHC loopback read clock
|
||||
*/
|
||||
v = ESDHC_STROBE_DLL_CTRL_ENABLE |
|
||||
ESDHC_STROBE_DLL_CTRL_SLV_UPDATE_INT_DEFAULT |
|
||||
(7 << ESDHC_STROBE_DLL_CTRL_SLV_DLY_TARGET_SHIFT);
|
||||
writel(v, host->ioaddr + ESDHC_STROBE_DLL_CTRL);
|
||||
/* wait 1us to make sure strobe dll status register stable */
|
||||
udelay(1);
|
||||
/* wait 5us to make sure strobe dll status register stable */
|
||||
udelay(5);
|
||||
v = readl(host->ioaddr + ESDHC_STROBE_DLL_STATUS);
|
||||
if (!(v & ESDHC_STROBE_DLL_STS_REF_LOCK))
|
||||
dev_warn(mmc_dev(host->mmc),
|
||||
|
@ -1436,6 +1452,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *)
|
||||
pdev->id_entry->driver_data;
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
|
||||
pm_qos_add_request(&imx_data->pm_qos_req,
|
||||
PM_QOS_CPU_DMA_LATENCY, 0);
|
||||
|
||||
imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
|
||||
if (IS_ERR(imx_data->clk_ipg)) {
|
||||
err = PTR_ERR(imx_data->clk_ipg);
|
||||
|
@ -1557,6 +1577,8 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
|
|||
disable_per_clk:
|
||||
clk_disable_unprepare(imx_data->clk_per);
|
||||
free_sdhci:
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
|
||||
pm_qos_remove_request(&imx_data->pm_qos_req);
|
||||
sdhci_pltfm_free(pdev);
|
||||
return err;
|
||||
}
|
||||
|
@ -1578,6 +1600,9 @@ static int sdhci_esdhc_imx_remove(struct platform_device *pdev)
|
|||
clk_disable_unprepare(imx_data->clk_ipg);
|
||||
clk_disable_unprepare(imx_data->clk_ahb);
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
|
||||
pm_qos_remove_request(&imx_data->pm_qos_req);
|
||||
|
||||
sdhci_pltfm_free(pdev);
|
||||
|
||||
return 0;
|
||||
|
@ -1649,6 +1674,9 @@ static int sdhci_esdhc_runtime_suspend(struct device *dev)
|
|||
}
|
||||
clk_disable_unprepare(imx_data->clk_ahb);
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
|
||||
pm_qos_remove_request(&imx_data->pm_qos_req);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1659,9 +1687,13 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
|
|||
struct pltfm_imx_data *imx_data = sdhci_pltfm_priv(pltfm_host);
|
||||
int err;
|
||||
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
|
||||
pm_qos_add_request(&imx_data->pm_qos_req,
|
||||
PM_QOS_CPU_DMA_LATENCY, 0);
|
||||
|
||||
err = clk_prepare_enable(imx_data->clk_ahb);
|
||||
if (err)
|
||||
return err;
|
||||
goto remove_pm_qos_request;
|
||||
|
||||
if (!sdhci_sdio_irq_enabled(host)) {
|
||||
err = clk_prepare_enable(imx_data->clk_per);
|
||||
|
@ -1690,6 +1722,9 @@ static int sdhci_esdhc_runtime_resume(struct device *dev)
|
|||
clk_disable_unprepare(imx_data->clk_per);
|
||||
disable_ahb_clk:
|
||||
clk_disable_unprepare(imx_data->clk_ahb);
|
||||
remove_pm_qos_request:
|
||||
if (imx_data->socdata->flags & ESDHC_FLAG_PMQOS)
|
||||
pm_qos_remove_request(&imx_data->pm_qos_req);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -832,7 +832,10 @@ static int sdhci_arasan_probe(struct platform_device *pdev)
|
|||
host->mmc_host_ops.start_signal_voltage_switch =
|
||||
sdhci_arasan_voltage_switch;
|
||||
sdhci_arasan->has_cqe = true;
|
||||
host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD;
|
||||
host->mmc->caps2 |= MMC_CAP2_CQE;
|
||||
|
||||
if (!of_property_read_bool(np, "disable-cqe-dcmd"))
|
||||
host->mmc->caps2 |= MMC_CAP2_CQE_DCMD;
|
||||
}
|
||||
|
||||
ret = sdhci_arasan_add_host(sdhci_arasan);
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include <linux/ktime.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
#include <linux/mmc/host.h>
|
||||
#include <linux/mmc/mmc.h>
|
||||
#include "sdhci-pltfm.h"
|
||||
#include "sdhci-esdhc.h"
|
||||
|
||||
|
@ -81,6 +82,7 @@ struct sdhci_esdhc {
|
|||
bool quirk_limited_clk_division;
|
||||
bool quirk_unreliable_pulse_detection;
|
||||
bool quirk_fixup_tuning;
|
||||
bool quirk_ignore_data_inhibit;
|
||||
unsigned int peripheral_clock;
|
||||
const struct esdhc_clk_fixup *clk_fixup;
|
||||
u32 div_ratio;
|
||||
|
@ -147,6 +149,19 @@ static u32 esdhc_readl_fixup(struct sdhci_host *host,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some controllers have unreliable Data Line Active
|
||||
* bit for commands with busy signal. This affects
|
||||
* Command Inhibit (data) bit. Just ignore it since
|
||||
* MMC core driver has already polled card status
|
||||
* with CMD13 after any command with busy siganl.
|
||||
*/
|
||||
if ((spec_reg == SDHCI_PRESENT_STATE) &&
|
||||
(esdhc->quirk_ignore_data_inhibit == true)) {
|
||||
ret = value & ~SDHCI_DATA_INHIBIT;
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = value;
|
||||
return ret;
|
||||
}
|
||||
|
@ -694,6 +709,9 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask)
|
|||
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
|
||||
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
|
||||
|
||||
if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc"))
|
||||
mdelay(5);
|
||||
|
||||
if (mask & SDHCI_RESET_ALL) {
|
||||
val = sdhci_readl(host, ESDHC_TBCTL);
|
||||
val &= ~ESDHC_TB_EN;
|
||||
|
@ -864,6 +882,25 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host,
|
|||
sdhci_set_uhs_signaling(host, timing);
|
||||
}
|
||||
|
||||
static u32 esdhc_irq(struct sdhci_host *host, u32 intmask)
|
||||
{
|
||||
u32 command;
|
||||
|
||||
if (of_find_compatible_node(NULL, NULL,
|
||||
"fsl,p2020-esdhc")) {
|
||||
command = SDHCI_GET_CMD(sdhci_readw(host,
|
||||
SDHCI_COMMAND));
|
||||
if (command == MMC_WRITE_MULTIPLE_BLOCK &&
|
||||
sdhci_readw(host, SDHCI_BLOCK_COUNT) &&
|
||||
intmask & SDHCI_INT_DATA_END) {
|
||||
intmask &= ~SDHCI_INT_DATA_END;
|
||||
sdhci_writel(host, SDHCI_INT_DATA_END,
|
||||
SDHCI_INT_STATUS);
|
||||
}
|
||||
}
|
||||
return intmask;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static u32 esdhc_proctl;
|
||||
static int esdhc_of_suspend(struct device *dev)
|
||||
|
@ -911,6 +948,7 @@ static const struct sdhci_ops sdhci_esdhc_be_ops = {
|
|||
.set_bus_width = esdhc_pltfm_set_bus_width,
|
||||
.reset = esdhc_reset,
|
||||
.set_uhs_signaling = esdhc_set_uhs_signaling,
|
||||
.irq = esdhc_irq,
|
||||
};
|
||||
|
||||
static const struct sdhci_ops sdhci_esdhc_le_ops = {
|
||||
|
@ -928,6 +966,7 @@ static const struct sdhci_ops sdhci_esdhc_le_ops = {
|
|||
.set_bus_width = esdhc_pltfm_set_bus_width,
|
||||
.reset = esdhc_reset,
|
||||
.set_uhs_signaling = esdhc_set_uhs_signaling,
|
||||
.irq = esdhc_irq,
|
||||
};
|
||||
|
||||
static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = {
|
||||
|
@ -955,6 +994,7 @@ static struct soc_device_attribute soc_incorrect_hostver[] = {
|
|||
|
||||
static struct soc_device_attribute soc_fixup_sdhc_clkdivs[] = {
|
||||
{ .family = "QorIQ LX2160A", .revision = "1.0", },
|
||||
{ .family = "QorIQ LX2160A", .revision = "2.0", },
|
||||
{ },
|
||||
};
|
||||
|
||||
|
@ -1074,6 +1114,11 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|||
if (esdhc->vendor_ver > VENDOR_V_22)
|
||||
host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ;
|
||||
|
||||
if (of_find_compatible_node(NULL, NULL, "fsl,p2020-esdhc")) {
|
||||
host->quirks2 |= SDHCI_QUIRK_RESET_AFTER_REQUEST;
|
||||
host->quirks2 |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(np, "fsl,p5040-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p5020-esdhc") ||
|
||||
of_device_is_compatible(np, "fsl,p4080-esdhc") ||
|
||||
|
@ -1084,12 +1129,14 @@ static int sdhci_esdhc_probe(struct platform_device *pdev)
|
|||
if (of_device_is_compatible(np, "fsl,ls1021a-esdhc"))
|
||||
host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL;
|
||||
|
||||
esdhc->quirk_ignore_data_inhibit = false;
|
||||
if (of_device_is_compatible(np, "fsl,p2020-esdhc")) {
|
||||
/*
|
||||
* Freescale messed up with P2020 as it has a non-standard
|
||||
* host control register
|
||||
*/
|
||||
host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL;
|
||||
esdhc->quirk_ignore_data_inhibit = true;
|
||||
}
|
||||
|
||||
/* call to generic mmc_of_parse to support additional capabilities */
|
||||
|
|
|
@ -785,7 +785,7 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host,
|
|||
sdhci_omap_start_clock(omap_host);
|
||||
}
|
||||
|
||||
void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
|
||||
static void sdhci_omap_reset(struct sdhci_host *host, u8 mask)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host);
|
||||
|
|
|
@ -31,6 +31,10 @@
|
|||
#include <linux/mmc/sdhci-pci-data.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
#include <asm/iosf_mbi.h>
|
||||
#endif
|
||||
|
||||
#include "cqhci.h"
|
||||
|
||||
#include "sdhci.h"
|
||||
|
@ -451,6 +455,50 @@ static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = {
|
|||
.probe_slot = pch_hc_probe_slot,
|
||||
};
|
||||
|
||||
#ifdef CONFIG_X86
|
||||
|
||||
#define BYT_IOSF_SCCEP 0x63
|
||||
#define BYT_IOSF_OCP_NETCTRL0 0x1078
|
||||
#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
|
||||
|
||||
static void byt_ocp_setting(struct pci_dev *pdev)
|
||||
{
|
||||
u32 val = 0;
|
||||
|
||||
if (pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC &&
|
||||
pdev->device != PCI_DEVICE_ID_INTEL_BYT_SDIO &&
|
||||
pdev->device != PCI_DEVICE_ID_INTEL_BYT_SD &&
|
||||
pdev->device != PCI_DEVICE_ID_INTEL_BYT_EMMC2)
|
||||
return;
|
||||
|
||||
if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
|
||||
&val)) {
|
||||
dev_err(&pdev->dev, "%s read error\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
|
||||
return;
|
||||
|
||||
val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
|
||||
|
||||
if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
|
||||
val)) {
|
||||
dev_err(&pdev->dev, "%s write error\n", __func__);
|
||||
return;
|
||||
}
|
||||
|
||||
dev_dbg(&pdev->dev, "%s completed\n", __func__);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
static inline void byt_ocp_setting(struct pci_dev *pdev)
|
||||
{
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
enum {
|
||||
INTEL_DSM_FNS = 0,
|
||||
INTEL_DSM_V18_SWITCH = 3,
|
||||
|
@ -715,6 +763,8 @@ static void byt_probe_slot(struct sdhci_pci_slot *slot)
|
|||
|
||||
byt_read_dsm(slot);
|
||||
|
||||
byt_ocp_setting(slot->chip->pdev);
|
||||
|
||||
ops->execute_tuning = intel_execute_tuning;
|
||||
ops->start_signal_voltage_switch = intel_start_signal_voltage_switch;
|
||||
|
||||
|
@ -938,7 +988,35 @@ static int byt_sd_probe_slot(struct sdhci_pci_slot *slot)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
|
||||
static int byt_resume(struct sdhci_pci_chip *chip)
|
||||
{
|
||||
byt_ocp_setting(chip->pdev);
|
||||
|
||||
return sdhci_pci_resume_host(chip);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PM
|
||||
|
||||
static int byt_runtime_resume(struct sdhci_pci_chip *chip)
|
||||
{
|
||||
byt_ocp_setting(chip->pdev);
|
||||
|
||||
return sdhci_pci_runtime_resume_host(chip);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.resume = byt_resume,
|
||||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
.runtime_resume = byt_runtime_resume,
|
||||
#endif
|
||||
.allow_runtime_pm = true,
|
||||
.probe_slot = byt_emmc_probe_slot,
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
|
||||
|
@ -972,6 +1050,12 @@ static const struct sdhci_pci_fixes sdhci_intel_glk_emmc = {
|
|||
};
|
||||
|
||||
static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.resume = byt_resume,
|
||||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
.runtime_resume = byt_runtime_resume,
|
||||
#endif
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
|
||||
SDHCI_QUIRK_NO_LED,
|
||||
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
|
||||
|
@ -983,6 +1067,12 @@ static const struct sdhci_pci_fixes sdhci_ni_byt_sdio = {
|
|||
};
|
||||
|
||||
static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.resume = byt_resume,
|
||||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
.runtime_resume = byt_runtime_resume,
|
||||
#endif
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
|
||||
SDHCI_QUIRK_NO_LED,
|
||||
.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON |
|
||||
|
@ -994,6 +1084,12 @@ static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = {
|
|||
};
|
||||
|
||||
static const struct sdhci_pci_fixes sdhci_intel_byt_sd = {
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
.resume = byt_resume,
|
||||
#endif
|
||||
#ifdef CONFIG_PM
|
||||
.runtime_resume = byt_runtime_resume,
|
||||
#endif
|
||||
.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC |
|
||||
SDHCI_QUIRK_NO_LED,
|
||||
.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON |
|
||||
|
@ -1576,6 +1672,8 @@ static const struct pci_device_id pci_ids[] = {
|
|||
SDHCI_PCI_DEVICE(INTEL, CNPH_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, ICP_EMMC, intel_glk_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, ICP_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(INTEL, CML_EMMC, intel_glk_emmc),
|
||||
SDHCI_PCI_DEVICE(INTEL, CML_SD, intel_byt_sd),
|
||||
SDHCI_PCI_DEVICE(O2, 8120, o2),
|
||||
SDHCI_PCI_DEVICE(O2, 8220, o2),
|
||||
SDHCI_PCI_DEVICE(O2, 8221, o2),
|
||||
|
|
|
@ -50,6 +50,8 @@
|
|||
#define PCI_DEVICE_ID_INTEL_CNPH_SD 0xa375
|
||||
#define PCI_DEVICE_ID_INTEL_ICP_EMMC 0x34c4
|
||||
#define PCI_DEVICE_ID_INTEL_ICP_SD 0x34f8
|
||||
#define PCI_DEVICE_ID_INTEL_CML_EMMC 0x02c4
|
||||
#define PCI_DEVICE_ID_INTEL_CML_SD 0x02f5
|
||||
|
||||
#define PCI_DEVICE_ID_SYSKONNECT_8000 0x8000
|
||||
#define PCI_DEVICE_ID_VIA_95D0 0x95d0
|
||||
|
|
|
@ -66,6 +66,22 @@
|
|||
|
||||
#define SDHCI_VNDR_TUN_CTRL0_0 0x1c0
|
||||
#define SDHCI_VNDR_TUN_CTRL0_TUN_HW_TAP 0x20000
|
||||
#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK 0x03fc0000
|
||||
#define SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT 18
|
||||
#define SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK 0x00001fc0
|
||||
#define SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT 6
|
||||
#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK 0x000e000
|
||||
#define SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT 13
|
||||
#define TRIES_128 2
|
||||
#define TRIES_256 4
|
||||
#define SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK 0x7
|
||||
|
||||
#define SDHCI_TEGRA_VNDR_TUN_CTRL1_0 0x1c4
|
||||
#define SDHCI_TEGRA_VNDR_TUN_STATUS0 0x1C8
|
||||
#define SDHCI_TEGRA_VNDR_TUN_STATUS1 0x1CC
|
||||
#define SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK 0xFF
|
||||
#define SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT 0x8
|
||||
#define TUNING_WORD_BIT_SIZE 32
|
||||
|
||||
#define SDHCI_TEGRA_AUTO_CAL_CONFIG 0x1e4
|
||||
#define SDHCI_AUTO_CAL_START BIT(31)
|
||||
|
@ -90,6 +106,7 @@
|
|||
#define NVQUIRK_HAS_PADCALIB BIT(6)
|
||||
#define NVQUIRK_NEEDS_PAD_CONTROL BIT(7)
|
||||
#define NVQUIRK_DIS_CARD_CLK_CONFIG_TAP BIT(8)
|
||||
#define NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING BIT(9)
|
||||
|
||||
/* SDMMC CQE Base Address for Tegra Host Ver 4.1 and Higher */
|
||||
#define SDHCI_TEGRA_CQE_BASE_ADDR 0xF000
|
||||
|
@ -97,6 +114,8 @@
|
|||
struct sdhci_tegra_soc_data {
|
||||
const struct sdhci_pltfm_data *pdata;
|
||||
u32 nvquirks;
|
||||
u8 min_tap_delay;
|
||||
u8 max_tap_delay;
|
||||
};
|
||||
|
||||
/* Magic pull up and pull down pad calibration offsets */
|
||||
|
@ -136,6 +155,8 @@ struct sdhci_tegra {
|
|||
u32 default_trim;
|
||||
u32 dqs_trim;
|
||||
bool enable_hwcq;
|
||||
unsigned long curr_clk_rate;
|
||||
u8 tuned_tap_delay;
|
||||
};
|
||||
|
||||
static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg)
|
||||
|
@ -241,6 +262,7 @@ static void tegra210_sdhci_writew(struct sdhci_host *host, u16 val, int reg)
|
|||
|
||||
if (is_tuning_cmd) {
|
||||
udelay(1);
|
||||
sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
|
||||
tegra_sdhci_configure_card_clk(host, clk_enabled);
|
||||
}
|
||||
}
|
||||
|
@ -722,6 +744,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
|
|||
*/
|
||||
host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
|
||||
clk_set_rate(pltfm_host->clk, host_clk);
|
||||
tegra_host->curr_clk_rate = host_clk;
|
||||
if (tegra_host->ddr_signaling)
|
||||
host->max_clk = host_clk;
|
||||
else
|
||||
|
@ -770,6 +793,159 @@ static void tegra_sdhci_hs400_dll_cal(struct sdhci_host *host)
|
|||
"HS400 delay line calibration timed out\n");
|
||||
}
|
||||
|
||||
static void tegra_sdhci_tap_correction(struct sdhci_host *host, u8 thd_up,
|
||||
u8 thd_low, u8 fixed_tap)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
||||
u32 val, tun_status;
|
||||
u8 word, bit, edge1, tap, window;
|
||||
bool tap_result;
|
||||
bool start_fail = false;
|
||||
bool start_pass = false;
|
||||
bool end_pass = false;
|
||||
bool first_fail = false;
|
||||
bool first_pass = false;
|
||||
u8 start_pass_tap = 0;
|
||||
u8 end_pass_tap = 0;
|
||||
u8 first_fail_tap = 0;
|
||||
u8 first_pass_tap = 0;
|
||||
u8 total_tuning_words = host->tuning_loop_count / TUNING_WORD_BIT_SIZE;
|
||||
|
||||
/*
|
||||
* Read auto-tuned results and extract good valid passing window by
|
||||
* filtering out un-wanted bubble/partial/merged windows.
|
||||
*/
|
||||
for (word = 0; word < total_tuning_words; word++) {
|
||||
val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
|
||||
val &= ~SDHCI_VNDR_TUN_CTRL0_TUN_WORD_SEL_MASK;
|
||||
val |= word;
|
||||
sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
|
||||
tun_status = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS0);
|
||||
bit = 0;
|
||||
while (bit < TUNING_WORD_BIT_SIZE) {
|
||||
tap = word * TUNING_WORD_BIT_SIZE + bit;
|
||||
tap_result = tun_status & (1 << bit);
|
||||
if (!tap_result && !start_fail) {
|
||||
start_fail = true;
|
||||
if (!first_fail) {
|
||||
first_fail_tap = tap;
|
||||
first_fail = true;
|
||||
}
|
||||
|
||||
} else if (tap_result && start_fail && !start_pass) {
|
||||
start_pass_tap = tap;
|
||||
start_pass = true;
|
||||
if (!first_pass) {
|
||||
first_pass_tap = tap;
|
||||
first_pass = true;
|
||||
}
|
||||
|
||||
} else if (!tap_result && start_fail && start_pass &&
|
||||
!end_pass) {
|
||||
end_pass_tap = tap - 1;
|
||||
end_pass = true;
|
||||
} else if (tap_result && start_pass && start_fail &&
|
||||
end_pass) {
|
||||
window = end_pass_tap - start_pass_tap;
|
||||
/* discard merged window and bubble window */
|
||||
if (window >= thd_up || window < thd_low) {
|
||||
start_pass_tap = tap;
|
||||
end_pass = false;
|
||||
} else {
|
||||
/* set tap at middle of valid window */
|
||||
tap = start_pass_tap + window / 2;
|
||||
tegra_host->tuned_tap_delay = tap;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
bit++;
|
||||
}
|
||||
}
|
||||
|
||||
if (!first_fail) {
|
||||
WARN_ON("no edge detected, continue with hw tuned delay.\n");
|
||||
} else if (first_pass) {
|
||||
/* set tap location at fixed tap relative to the first edge */
|
||||
edge1 = first_fail_tap + (first_pass_tap - first_fail_tap) / 2;
|
||||
if (edge1 - 1 > fixed_tap)
|
||||
tegra_host->tuned_tap_delay = edge1 - fixed_tap;
|
||||
else
|
||||
tegra_host->tuned_tap_delay = edge1 + fixed_tap;
|
||||
}
|
||||
}
|
||||
|
||||
static void tegra_sdhci_post_tuning(struct sdhci_host *host)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
||||
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
||||
u32 avg_tap_dly, val, min_tap_dly, max_tap_dly;
|
||||
u8 fixed_tap, start_tap, end_tap, window_width;
|
||||
u8 thdupper, thdlower;
|
||||
u8 num_iter;
|
||||
u32 clk_rate_mhz, period_ps, bestcase, worstcase;
|
||||
|
||||
/* retain HW tuned tap to use incase if no correction is needed */
|
||||
val = sdhci_readl(host, SDHCI_TEGRA_VENDOR_CLOCK_CTRL);
|
||||
tegra_host->tuned_tap_delay = (val & SDHCI_CLOCK_CTRL_TAP_MASK) >>
|
||||
SDHCI_CLOCK_CTRL_TAP_SHIFT;
|
||||
if (soc_data->min_tap_delay && soc_data->max_tap_delay) {
|
||||
min_tap_dly = soc_data->min_tap_delay;
|
||||
max_tap_dly = soc_data->max_tap_delay;
|
||||
clk_rate_mhz = tegra_host->curr_clk_rate / USEC_PER_SEC;
|
||||
period_ps = USEC_PER_SEC / clk_rate_mhz;
|
||||
bestcase = period_ps / min_tap_dly;
|
||||
worstcase = period_ps / max_tap_dly;
|
||||
/*
|
||||
* Upper and Lower bound thresholds used to detect merged and
|
||||
* bubble windows
|
||||
*/
|
||||
thdupper = (2 * worstcase + bestcase) / 2;
|
||||
thdlower = worstcase / 4;
|
||||
/*
|
||||
* fixed tap is used when HW tuning result contains single edge
|
||||
* and tap is set at fixed tap delay relative to the first edge
|
||||
*/
|
||||
avg_tap_dly = (period_ps * 2) / (min_tap_dly + max_tap_dly);
|
||||
fixed_tap = avg_tap_dly / 2;
|
||||
|
||||
val = sdhci_readl(host, SDHCI_TEGRA_VNDR_TUN_STATUS1);
|
||||
start_tap = val & SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
|
||||
end_tap = (val >> SDHCI_TEGRA_VNDR_TUN_STATUS1_END_TAP_SHIFT) &
|
||||
SDHCI_TEGRA_VNDR_TUN_STATUS1_TAP_MASK;
|
||||
window_width = end_tap - start_tap;
|
||||
num_iter = host->tuning_loop_count;
|
||||
/*
|
||||
* partial window includes edges of the tuning range.
|
||||
* merged window includes more taps so window width is higher
|
||||
* than upper threshold.
|
||||
*/
|
||||
if (start_tap == 0 || (end_tap == (num_iter - 1)) ||
|
||||
(end_tap == num_iter - 2) || window_width >= thdupper) {
|
||||
pr_debug("%s: Apply tuning correction\n",
|
||||
mmc_hostname(host->mmc));
|
||||
tegra_sdhci_tap_correction(host, thdupper, thdlower,
|
||||
fixed_tap);
|
||||
}
|
||||
}
|
||||
|
||||
tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
|
||||
}
|
||||
|
||||
static int tegra_sdhci_execute_hw_tuning(struct mmc_host *mmc, u32 opcode)
|
||||
{
|
||||
struct sdhci_host *host = mmc_priv(mmc);
|
||||
int err;
|
||||
|
||||
err = sdhci_execute_tuning(mmc, opcode);
|
||||
if (!err && !host->tuning_err)
|
||||
tegra_sdhci_post_tuning(host);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
|
||||
unsigned timing)
|
||||
{
|
||||
|
@ -778,16 +954,22 @@ static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
|
|||
bool set_default_tap = false;
|
||||
bool set_dqs_trim = false;
|
||||
bool do_hs400_dll_cal = false;
|
||||
u8 iter = TRIES_256;
|
||||
u32 val;
|
||||
|
||||
tegra_host->ddr_signaling = false;
|
||||
switch (timing) {
|
||||
case MMC_TIMING_UHS_SDR50:
|
||||
break;
|
||||
case MMC_TIMING_UHS_SDR104:
|
||||
case MMC_TIMING_MMC_HS200:
|
||||
/* Don't set default tap on tunable modes. */
|
||||
iter = TRIES_128;
|
||||
break;
|
||||
case MMC_TIMING_MMC_HS400:
|
||||
set_dqs_trim = true;
|
||||
do_hs400_dll_cal = true;
|
||||
iter = TRIES_128;
|
||||
break;
|
||||
case MMC_TIMING_MMC_DDR52:
|
||||
case MMC_TIMING_UHS_DDR50:
|
||||
|
@ -799,11 +981,25 @@ static void tegra_sdhci_set_uhs_signaling(struct sdhci_host *host,
|
|||
break;
|
||||
}
|
||||
|
||||
val = sdhci_readl(host, SDHCI_VNDR_TUN_CTRL0_0);
|
||||
val &= ~(SDHCI_VNDR_TUN_CTRL0_TUN_ITER_MASK |
|
||||
SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_MASK |
|
||||
SDHCI_VNDR_TUN_CTRL0_MUL_M_MASK);
|
||||
val |= (iter << SDHCI_VNDR_TUN_CTRL0_TUN_ITER_SHIFT |
|
||||
0 << SDHCI_VNDR_TUN_CTRL0_START_TAP_VAL_SHIFT |
|
||||
1 << SDHCI_VNDR_TUN_CTRL0_MUL_M_SHIFT);
|
||||
sdhci_writel(host, val, SDHCI_VNDR_TUN_CTRL0_0);
|
||||
sdhci_writel(host, 0, SDHCI_TEGRA_VNDR_TUN_CTRL1_0);
|
||||
|
||||
host->tuning_loop_count = (iter == TRIES_128) ? 128 : 256;
|
||||
|
||||
sdhci_set_uhs_signaling(host, timing);
|
||||
|
||||
tegra_sdhci_pad_autocalib(host);
|
||||
|
||||
if (set_default_tap)
|
||||
if (tegra_host->tuned_tap_delay && !set_default_tap)
|
||||
tegra_sdhci_set_tap(host, tegra_host->tuned_tap_delay);
|
||||
else
|
||||
tegra_sdhci_set_tap(host, tegra_host->default_tap);
|
||||
|
||||
if (set_dqs_trim)
|
||||
|
@ -928,23 +1124,86 @@ static void tegra_sdhci_voltage_switch(struct sdhci_host *host)
|
|||
tegra_host->pad_calib_required = true;
|
||||
}
|
||||
|
||||
static void tegra_cqhci_writel(struct cqhci_host *cq_host, u32 val, int reg)
|
||||
{
|
||||
struct mmc_host *mmc = cq_host->mmc;
|
||||
u8 ctrl;
|
||||
ktime_t timeout;
|
||||
bool timed_out;
|
||||
|
||||
/*
|
||||
* During CQE resume/unhalt, CQHCI driver unhalts CQE prior to
|
||||
* cqhci_host_ops enable where SDHCI DMA and BLOCK_SIZE registers need
|
||||
* to be re-configured.
|
||||
* Tegra CQHCI/SDHCI prevents write access to block size register when
|
||||
* CQE is unhalted. So handling CQE resume sequence here to configure
|
||||
* SDHCI block registers prior to exiting CQE halt state.
|
||||
*/
|
||||
if (reg == CQHCI_CTL && !(val & CQHCI_HALT) &&
|
||||
cqhci_readl(cq_host, CQHCI_CTL) & CQHCI_HALT) {
|
||||
sdhci_cqe_enable(mmc);
|
||||
writel(val, cq_host->mmio + reg);
|
||||
timeout = ktime_add_us(ktime_get(), 50);
|
||||
while (1) {
|
||||
timed_out = ktime_compare(ktime_get(), timeout) > 0;
|
||||
ctrl = cqhci_readl(cq_host, CQHCI_CTL);
|
||||
if (!(ctrl & CQHCI_HALT) || timed_out)
|
||||
break;
|
||||
}
|
||||
/*
|
||||
* CQE usually resumes very quick, but incase if Tegra CQE
|
||||
* doesn't resume retry unhalt.
|
||||
*/
|
||||
if (timed_out)
|
||||
writel(val, cq_host->mmio + reg);
|
||||
} else {
|
||||
writel(val, cq_host->mmio + reg);
|
||||
}
|
||||
}
|
||||
|
||||
static void sdhci_tegra_update_dcmd_desc(struct mmc_host *mmc,
|
||||
struct mmc_request *mrq, u64 *data)
|
||||
{
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(mmc_priv(mmc));
|
||||
struct sdhci_tegra *tegra_host = sdhci_pltfm_priv(pltfm_host);
|
||||
const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
|
||||
|
||||
if (soc_data->nvquirks & NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING &&
|
||||
mrq->cmd->flags & MMC_RSP_R1B)
|
||||
*data |= CQHCI_CMD_TIMING(1);
|
||||
}
|
||||
|
||||
static void sdhci_tegra_cqe_enable(struct mmc_host *mmc)
|
||||
{
|
||||
struct cqhci_host *cq_host = mmc->cqe_private;
|
||||
u32 cqcfg = 0;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Tegra SDMMC Controller design prevents write access to BLOCK_COUNT
|
||||
* registers when CQE is enabled.
|
||||
* Tegra CQHCI/SDMMC design prevents write access to sdhci block size
|
||||
* register when CQE is enabled and unhalted.
|
||||
* CQHCI driver enables CQE prior to activation, so disable CQE before
|
||||
* programming block size in sdhci controller and enable it back.
|
||||
*/
|
||||
cqcfg = cqhci_readl(cq_host, CQHCI_CFG);
|
||||
if (cqcfg & CQHCI_ENABLE)
|
||||
cqhci_writel(cq_host, (cqcfg & ~CQHCI_ENABLE), CQHCI_CFG);
|
||||
|
||||
if (!cq_host->activated) {
|
||||
val = cqhci_readl(cq_host, CQHCI_CFG);
|
||||
if (val & CQHCI_ENABLE)
|
||||
cqhci_writel(cq_host, (val & ~CQHCI_ENABLE),
|
||||
CQHCI_CFG);
|
||||
sdhci_cqe_enable(mmc);
|
||||
if (val & CQHCI_ENABLE)
|
||||
cqhci_writel(cq_host, val, CQHCI_CFG);
|
||||
}
|
||||
|
||||
if (cqcfg & CQHCI_ENABLE)
|
||||
cqhci_writel(cq_host, cqcfg, CQHCI_CFG);
|
||||
/*
|
||||
* CMD CRC errors are seen sometimes with some eMMC devices when status
|
||||
* command is sent during transfer of last data block which is the
|
||||
* default case as send status command block counter (CBC) is 1.
|
||||
* Recommended fix to set CBC to 0 allowing send status command only
|
||||
* when data lines are idle.
|
||||
*/
|
||||
val = cqhci_readl(cq_host, CQHCI_SSC1);
|
||||
val &= ~CQHCI_SSC1_CBC_MASK;
|
||||
cqhci_writel(cq_host, val, CQHCI_SSC1);
|
||||
}
|
||||
|
||||
static void sdhci_tegra_dumpregs(struct mmc_host *mmc)
|
||||
|
@ -966,9 +1225,11 @@ static u32 sdhci_tegra_cqhci_irq(struct sdhci_host *host, u32 intmask)
|
|||
}
|
||||
|
||||
static const struct cqhci_host_ops sdhci_tegra_cqhci_ops = {
|
||||
.write_l = tegra_cqhci_writel,
|
||||
.enable = sdhci_tegra_cqe_enable,
|
||||
.disable = sdhci_cqe_disable,
|
||||
.dumpregs = sdhci_tegra_dumpregs,
|
||||
.update_dcmd_desc = sdhci_tegra_update_dcmd_desc,
|
||||
};
|
||||
|
||||
static const struct sdhci_ops tegra_sdhci_ops = {
|
||||
|
@ -1109,6 +1370,8 @@ static const struct sdhci_tegra_soc_data soc_data_tegra210 = {
|
|||
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
|
||||
NVQUIRK_ENABLE_SDR50 |
|
||||
NVQUIRK_ENABLE_SDR104,
|
||||
.min_tap_delay = 106,
|
||||
.max_tap_delay = 185,
|
||||
};
|
||||
|
||||
static const struct sdhci_ops tegra186_sdhci_ops = {
|
||||
|
@ -1143,15 +1406,30 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = {
|
|||
};
|
||||
|
||||
static const struct sdhci_tegra_soc_data soc_data_tegra186 = {
|
||||
.pdata = &sdhci_tegra186_pdata,
|
||||
.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
|
||||
NVQUIRK_HAS_PADCALIB |
|
||||
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
|
||||
NVQUIRK_ENABLE_SDR50 |
|
||||
NVQUIRK_ENABLE_SDR104 |
|
||||
NVQUIRK_CQHCI_DCMD_R1B_CMD_TIMING,
|
||||
.min_tap_delay = 84,
|
||||
.max_tap_delay = 136,
|
||||
};
|
||||
|
||||
static const struct sdhci_tegra_soc_data soc_data_tegra194 = {
|
||||
.pdata = &sdhci_tegra186_pdata,
|
||||
.nvquirks = NVQUIRK_NEEDS_PAD_CONTROL |
|
||||
NVQUIRK_HAS_PADCALIB |
|
||||
NVQUIRK_DIS_CARD_CLK_CONFIG_TAP |
|
||||
NVQUIRK_ENABLE_SDR50 |
|
||||
NVQUIRK_ENABLE_SDR104,
|
||||
.min_tap_delay = 96,
|
||||
.max_tap_delay = 139,
|
||||
};
|
||||
|
||||
static const struct of_device_id sdhci_tegra_dt_match[] = {
|
||||
{ .compatible = "nvidia,tegra194-sdhci", .data = &soc_data_tegra194 },
|
||||
{ .compatible = "nvidia,tegra186-sdhci", .data = &soc_data_tegra186 },
|
||||
{ .compatible = "nvidia,tegra210-sdhci", .data = &soc_data_tegra210 },
|
||||
{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra124 },
|
||||
|
@ -1250,6 +1528,10 @@ static int sdhci_tegra_probe(struct platform_device *pdev)
|
|||
host->mmc_host_ops.hs400_enhanced_strobe =
|
||||
tegra_sdhci_hs400_enhanced_strobe;
|
||||
|
||||
if (!host->ops->platform_execute_tuning)
|
||||
host->mmc_host_ops.execute_tuning =
|
||||
tegra_sdhci_execute_hw_tuning;
|
||||
|
||||
rc = mmc_of_parse(host->mmc);
|
||||
if (rc)
|
||||
goto err_parse_dt;
|
||||
|
@ -1329,11 +1611,67 @@ static int sdhci_tegra_remove(struct platform_device *pdev)
|
|||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PM_SLEEP
|
||||
static int __maybe_unused sdhci_tegra_suspend(struct device *dev)
|
||||
{
|
||||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
int ret;
|
||||
|
||||
if (host->mmc->caps2 & MMC_CAP2_CQE) {
|
||||
ret = cqhci_suspend(host->mmc);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = sdhci_suspend_host(host);
|
||||
if (ret) {
|
||||
cqhci_resume(host->mmc);
|
||||
return ret;
|
||||
}
|
||||
|
||||
clk_disable_unprepare(pltfm_host->clk);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __maybe_unused sdhci_tegra_resume(struct device *dev)
|
||||
{
|
||||
struct sdhci_host *host = dev_get_drvdata(dev);
|
||||
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
|
||||
int ret;
|
||||
|
||||
ret = clk_prepare_enable(pltfm_host->clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = sdhci_resume_host(host);
|
||||
if (ret)
|
||||
goto disable_clk;
|
||||
|
||||
if (host->mmc->caps2 & MMC_CAP2_CQE) {
|
||||
ret = cqhci_resume(host->mmc);
|
||||
if (ret)
|
||||
goto suspend_host;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
suspend_host:
|
||||
sdhci_suspend_host(host);
|
||||
disable_clk:
|
||||
clk_disable_unprepare(pltfm_host->clk);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
static SIMPLE_DEV_PM_OPS(sdhci_tegra_dev_pm_ops, sdhci_tegra_suspend,
|
||||
sdhci_tegra_resume);
|
||||
|
||||
static struct platform_driver sdhci_tegra_driver = {
|
||||
.driver = {
|
||||
.name = "sdhci-tegra",
|
||||
.of_match_table = sdhci_tegra_dt_match,
|
||||
.pm = &sdhci_pltfm_pmops,
|
||||
.pm = &sdhci_tegra_dev_pm_ops,
|
||||
},
|
||||
.probe = sdhci_tegra_probe,
|
||||
.remove = sdhci_tegra_remove,
|
||||
|
|
|
@ -446,6 +446,28 @@ static inline void sdhci_led_deactivate(struct sdhci_host *host)
|
|||
|
||||
#endif
|
||||
|
||||
static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
|
||||
unsigned long timeout)
|
||||
{
|
||||
if (sdhci_data_line_cmd(mrq->cmd))
|
||||
mod_timer(&host->data_timer, timeout);
|
||||
else
|
||||
mod_timer(&host->timer, timeout);
|
||||
}
|
||||
|
||||
static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
|
||||
{
|
||||
if (sdhci_data_line_cmd(mrq->cmd))
|
||||
del_timer(&host->data_timer);
|
||||
else
|
||||
del_timer(&host->timer);
|
||||
}
|
||||
|
||||
static inline bool sdhci_has_requests(struct sdhci_host *host)
|
||||
{
|
||||
return host->cmd || host->data_cmd;
|
||||
}
|
||||
|
||||
/*****************************************************************************\
|
||||
* *
|
||||
* Core functions *
|
||||
|
@ -1221,6 +1243,18 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
|
|||
{
|
||||
int i;
|
||||
|
||||
if (host->cmd && host->cmd->mrq == mrq)
|
||||
host->cmd = NULL;
|
||||
|
||||
if (host->data_cmd && host->data_cmd->mrq == mrq)
|
||||
host->data_cmd = NULL;
|
||||
|
||||
if (host->data && host->data->mrq == mrq)
|
||||
host->data = NULL;
|
||||
|
||||
if (sdhci_needs_reset(host, mrq))
|
||||
host->pending_reset = true;
|
||||
|
||||
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
|
||||
if (host->mrqs_done[i] == mrq) {
|
||||
WARN_ON(1);
|
||||
|
@ -1237,24 +1271,17 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
|
|||
|
||||
WARN_ON(i >= SDHCI_MAX_MRQS);
|
||||
|
||||
tasklet_schedule(&host->finish_tasklet);
|
||||
sdhci_del_timer(host, mrq);
|
||||
|
||||
if (!sdhci_has_requests(host))
|
||||
sdhci_led_deactivate(host);
|
||||
}
|
||||
|
||||
static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
|
||||
{
|
||||
if (host->cmd && host->cmd->mrq == mrq)
|
||||
host->cmd = NULL;
|
||||
|
||||
if (host->data_cmd && host->data_cmd->mrq == mrq)
|
||||
host->data_cmd = NULL;
|
||||
|
||||
if (host->data && host->data->mrq == mrq)
|
||||
host->data = NULL;
|
||||
|
||||
if (sdhci_needs_reset(host, mrq))
|
||||
host->pending_reset = true;
|
||||
|
||||
__sdhci_finish_mrq(host, mrq);
|
||||
|
||||
queue_work(host->complete_wq, &host->complete_work);
|
||||
}
|
||||
|
||||
static void sdhci_finish_data(struct sdhci_host *host)
|
||||
|
@ -1305,34 +1332,17 @@ static void sdhci_finish_data(struct sdhci_host *host)
|
|||
* responsibility to send the stop command if required.
|
||||
*/
|
||||
if (data->mrq->cap_cmd_during_tfr) {
|
||||
sdhci_finish_mrq(host, data->mrq);
|
||||
__sdhci_finish_mrq(host, data->mrq);
|
||||
} else {
|
||||
/* Avoid triggering warning in sdhci_send_command() */
|
||||
host->cmd = NULL;
|
||||
sdhci_send_command(host, data->stop);
|
||||
}
|
||||
} else {
|
||||
sdhci_finish_mrq(host, data->mrq);
|
||||
__sdhci_finish_mrq(host, data->mrq);
|
||||
}
|
||||
}
|
||||
|
||||
static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
|
||||
unsigned long timeout)
|
||||
{
|
||||
if (sdhci_data_line_cmd(mrq->cmd))
|
||||
mod_timer(&host->data_timer, timeout);
|
||||
else
|
||||
mod_timer(&host->timer, timeout);
|
||||
}
|
||||
|
||||
static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
|
||||
{
|
||||
if (sdhci_data_line_cmd(mrq->cmd))
|
||||
del_timer(&host->data_timer);
|
||||
else
|
||||
del_timer(&host->timer);
|
||||
}
|
||||
|
||||
void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
|
||||
{
|
||||
int flags;
|
||||
|
@ -1492,7 +1502,7 @@ static void sdhci_finish_command(struct sdhci_host *host)
|
|||
sdhci_finish_data(host);
|
||||
|
||||
if (!cmd->data)
|
||||
sdhci_finish_mrq(host, cmd->mrq);
|
||||
__sdhci_finish_mrq(host, cmd->mrq);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2364,9 +2374,9 @@ static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
|
|||
|
||||
/*
|
||||
* Issue opcode repeatedly till Execute Tuning is set to 0 or the number
|
||||
* of loops reaches 40 times.
|
||||
* of loops reaches tuning loop count.
|
||||
*/
|
||||
for (i = 0; i < MAX_TUNING_LOOP; i++) {
|
||||
for (i = 0; i < host->tuning_loop_count; i++) {
|
||||
u16 ctrl;
|
||||
|
||||
sdhci_send_tuning(host, opcode);
|
||||
|
@ -2523,11 +2533,6 @@ static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
|
|||
sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
|
||||
}
|
||||
|
||||
static inline bool sdhci_has_requests(struct sdhci_host *host)
|
||||
{
|
||||
return host->cmd || host->data_cmd;
|
||||
}
|
||||
|
||||
static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
|
||||
{
|
||||
if (host->data_cmd) {
|
||||
|
@ -2589,7 +2594,7 @@ static const struct mmc_host_ops sdhci_ops = {
|
|||
|
||||
/*****************************************************************************\
|
||||
* *
|
||||
* Tasklets *
|
||||
* Request done *
|
||||
* *
|
||||
\*****************************************************************************/
|
||||
|
||||
|
@ -2612,8 +2617,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
|
|||
return true;
|
||||
}
|
||||
|
||||
sdhci_del_timer(host, mrq);
|
||||
|
||||
/*
|
||||
* Always unmap the data buffers if they were mapped by
|
||||
* sdhci_prepare_data() whenever we finish with a request.
|
||||
|
@ -2695,9 +2698,6 @@ static bool sdhci_request_done(struct sdhci_host *host)
|
|||
host->pending_reset = false;
|
||||
}
|
||||
|
||||
if (!sdhci_has_requests(host))
|
||||
sdhci_led_deactivate(host);
|
||||
|
||||
host->mrqs_done[i] = NULL;
|
||||
|
||||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
|
@ -2707,9 +2707,10 @@ static bool sdhci_request_done(struct sdhci_host *host)
|
|||
return false;
|
||||
}
|
||||
|
||||
static void sdhci_tasklet_finish(unsigned long param)
|
||||
static void sdhci_complete_work(struct work_struct *work)
|
||||
{
|
||||
struct sdhci_host *host = (struct sdhci_host *)param;
|
||||
struct sdhci_host *host = container_of(work, struct sdhci_host,
|
||||
complete_work);
|
||||
|
||||
while (!sdhci_request_done(host))
|
||||
;
|
||||
|
@ -2754,6 +2755,7 @@ static void sdhci_timeout_data_timer(struct timer_list *t)
|
|||
if (host->data) {
|
||||
host->data->error = -ETIMEDOUT;
|
||||
sdhci_finish_data(host);
|
||||
queue_work(host->complete_wq, &host->complete_work);
|
||||
} else if (host->data_cmd) {
|
||||
host->data_cmd->error = -ETIMEDOUT;
|
||||
sdhci_finish_mrq(host, host->data_cmd->mrq);
|
||||
|
@ -2819,7 +2821,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
|
|||
return;
|
||||
}
|
||||
|
||||
sdhci_finish_mrq(host, host->cmd->mrq);
|
||||
__sdhci_finish_mrq(host, host->cmd->mrq);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -2833,7 +2835,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
|
|||
|
||||
if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
|
||||
mrq->sbc->error = err;
|
||||
sdhci_finish_mrq(host, mrq);
|
||||
__sdhci_finish_mrq(host, mrq);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -2897,7 +2899,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
|
|||
if (intmask & SDHCI_INT_DATA_TIMEOUT) {
|
||||
host->data_cmd = NULL;
|
||||
data_cmd->error = -ETIMEDOUT;
|
||||
sdhci_finish_mrq(host, data_cmd->mrq);
|
||||
__sdhci_finish_mrq(host, data_cmd->mrq);
|
||||
return;
|
||||
}
|
||||
if (intmask & SDHCI_INT_DATA_END) {
|
||||
|
@ -2910,7 +2912,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
|
|||
if (host->cmd == data_cmd)
|
||||
return;
|
||||
|
||||
sdhci_finish_mrq(host, data_cmd->mrq);
|
||||
__sdhci_finish_mrq(host, data_cmd->mrq);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -2993,12 +2995,24 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
|
|||
}
|
||||
}
|
||||
|
||||
static inline bool sdhci_defer_done(struct sdhci_host *host,
|
||||
struct mmc_request *mrq)
|
||||
{
|
||||
struct mmc_data *data = mrq->data;
|
||||
|
||||
return host->pending_reset ||
|
||||
((host->flags & SDHCI_REQ_USE_DMA) && data &&
|
||||
data->host_cookie == COOKIE_MAPPED);
|
||||
}
|
||||
|
||||
static irqreturn_t sdhci_irq(int irq, void *dev_id)
|
||||
{
|
||||
struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
|
||||
irqreturn_t result = IRQ_NONE;
|
||||
struct sdhci_host *host = dev_id;
|
||||
u32 intmask, mask, unexpected = 0;
|
||||
int max_loops = 16;
|
||||
int i;
|
||||
|
||||
spin_lock(&host->lock);
|
||||
|
||||
|
@ -3092,9 +3106,30 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
|
|||
|
||||
intmask = sdhci_readl(host, SDHCI_INT_STATUS);
|
||||
} while (intmask && --max_loops);
|
||||
|
||||
/* Determine if mrqs can be completed immediately */
|
||||
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
|
||||
struct mmc_request *mrq = host->mrqs_done[i];
|
||||
|
||||
if (!mrq)
|
||||
continue;
|
||||
|
||||
if (sdhci_defer_done(host, mrq)) {
|
||||
result = IRQ_WAKE_THREAD;
|
||||
} else {
|
||||
mrqs_done[i] = mrq;
|
||||
host->mrqs_done[i] = NULL;
|
||||
}
|
||||
}
|
||||
out:
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
/* Process mrqs ready for immediate completion */
|
||||
for (i = 0; i < SDHCI_MAX_MRQS; i++) {
|
||||
if (mrqs_done[i])
|
||||
mmc_request_done(host->mmc, mrqs_done[i]);
|
||||
}
|
||||
|
||||
if (unexpected) {
|
||||
pr_err("%s: Unexpected interrupt 0x%08x.\n",
|
||||
mmc_hostname(host->mmc), unexpected);
|
||||
|
@ -3110,6 +3145,9 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
|
|||
unsigned long flags;
|
||||
u32 isr;
|
||||
|
||||
while (!sdhci_request_done(host))
|
||||
;
|
||||
|
||||
spin_lock_irqsave(&host->lock, flags);
|
||||
isr = host->thread_isr;
|
||||
host->thread_isr = 0;
|
||||
|
@ -3131,7 +3169,7 @@ static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
|
|||
spin_unlock_irqrestore(&host->lock, flags);
|
||||
}
|
||||
|
||||
return isr ? IRQ_HANDLED : IRQ_NONE;
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
/*****************************************************************************\
|
||||
|
@ -3483,6 +3521,7 @@ struct sdhci_host *sdhci_alloc_host(struct device *dev,
|
|||
host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
|
||||
|
||||
host->tuning_delay = -1;
|
||||
host->tuning_loop_count = MAX_TUNING_LOOP;
|
||||
|
||||
host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
|
||||
|
||||
|
@ -4213,14 +4252,15 @@ EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
|
|||
|
||||
int __sdhci_add_host(struct sdhci_host *host)
|
||||
{
|
||||
unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
|
||||
struct mmc_host *mmc = host->mmc;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Init tasklets.
|
||||
*/
|
||||
tasklet_init(&host->finish_tasklet,
|
||||
sdhci_tasklet_finish, (unsigned long)host);
|
||||
host->complete_wq = alloc_workqueue("sdhci", flags, 0);
|
||||
if (!host->complete_wq)
|
||||
return -ENOMEM;
|
||||
|
||||
INIT_WORK(&host->complete_work, sdhci_complete_work);
|
||||
|
||||
timer_setup(&host->timer, sdhci_timeout_timer, 0);
|
||||
timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
|
||||
|
@ -4234,7 +4274,7 @@ int __sdhci_add_host(struct sdhci_host *host)
|
|||
if (ret) {
|
||||
pr_err("%s: Failed to request IRQ %d: %d\n",
|
||||
mmc_hostname(mmc), host->irq, ret);
|
||||
goto untasklet;
|
||||
goto unwq;
|
||||
}
|
||||
|
||||
ret = sdhci_led_register(host);
|
||||
|
@ -4265,8 +4305,8 @@ int __sdhci_add_host(struct sdhci_host *host)
|
|||
sdhci_writel(host, 0, SDHCI_INT_ENABLE);
|
||||
sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
|
||||
free_irq(host->irq, host);
|
||||
untasklet:
|
||||
tasklet_kill(&host->finish_tasklet);
|
||||
unwq:
|
||||
destroy_workqueue(host->complete_wq);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -4328,7 +4368,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)
|
|||
del_timer_sync(&host->timer);
|
||||
del_timer_sync(&host->data_timer);
|
||||
|
||||
tasklet_kill(&host->finish_tasklet);
|
||||
destroy_workqueue(host->complete_wq);
|
||||
|
||||
if (!IS_ERR(mmc->supply.vqmmc))
|
||||
regulator_disable(mmc->supply.vqmmc);
|
||||
|
|
|
@ -560,7 +560,8 @@ struct sdhci_host {
|
|||
|
||||
unsigned int desc_sz; /* ADMA descriptor size */
|
||||
|
||||
struct tasklet_struct finish_tasklet; /* Tasklet structures */
|
||||
struct workqueue_struct *complete_wq; /* Request completion wq */
|
||||
struct work_struct complete_work; /* Request completion work */
|
||||
|
||||
struct timer_list timer; /* Timer for timeouts */
|
||||
struct timer_list data_timer; /* Timer for data timeouts */
|
||||
|
@ -596,6 +597,7 @@ struct sdhci_host {
|
|||
#define SDHCI_TUNING_MODE_3 2
|
||||
/* Delay (ms) between tuning commands */
|
||||
int tuning_delay;
|
||||
int tuning_loop_count;
|
||||
|
||||
/* Host SDMA buffer boundary. */
|
||||
u32 sdma_boundary;
|
||||
|
|
|
@ -158,6 +158,27 @@ static void sdhci_am654_set_power(struct sdhci_host *host, unsigned char mode,
|
|||
sdhci_set_power_noreg(host, mode, vdd);
|
||||
}
|
||||
|
||||
static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg)
|
||||
{
|
||||
unsigned char timing = host->mmc->ios.timing;
|
||||
|
||||
if (reg == SDHCI_HOST_CONTROL) {
|
||||
switch (timing) {
|
||||
/*
|
||||
* According to the data manual, HISPD bit
|
||||
* should not be set in these speed modes.
|
||||
*/
|
||||
case MMC_TIMING_SD_HS:
|
||||
case MMC_TIMING_MMC_HS:
|
||||
case MMC_TIMING_UHS_SDR12:
|
||||
case MMC_TIMING_UHS_SDR25:
|
||||
val &= ~SDHCI_CTRL_HISPD;
|
||||
}
|
||||
}
|
||||
|
||||
writeb(val, host->ioaddr + reg);
|
||||
}
|
||||
|
||||
static struct sdhci_ops sdhci_am654_ops = {
|
||||
.get_max_clock = sdhci_pltfm_clk_get_max_clock,
|
||||
.get_timeout_clock = sdhci_pltfm_clk_get_max_clock,
|
||||
|
@ -165,6 +186,7 @@ static struct sdhci_ops sdhci_am654_ops = {
|
|||
.set_bus_width = sdhci_set_bus_width,
|
||||
.set_power = sdhci_am654_set_power,
|
||||
.set_clock = sdhci_am654_set_clock,
|
||||
.write_b = sdhci_am654_write_b,
|
||||
.reset = sdhci_reset,
|
||||
};
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
*
|
||||
* TC6393XB TC6391XB TC6387XB T7L66XB ASIC3
|
||||
*
|
||||
* Copyright (C) 2015-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2015-19 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2016-17 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2007 Ian Molton
|
||||
* Copyright (C) 2004 Ian Molton
|
||||
|
@ -105,6 +105,8 @@
|
|||
TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)
|
||||
#define TMIO_MASK_IRQ (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD)
|
||||
|
||||
#define TMIO_MAX_BLK_SIZE 512
|
||||
|
||||
struct tmio_mmc_data;
|
||||
struct tmio_mmc_host;
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@
|
|||
*
|
||||
* TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs
|
||||
*
|
||||
* Copyright (C) 2015-17 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-17 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2015-19 Renesas Electronics Corporation
|
||||
* Copyright (C) 2016-19 Sang Engineering, Wolfram Sang
|
||||
* Copyright (C) 2017 Horms Solutions, Simon Horman
|
||||
* Copyright (C) 2011 Guennadi Liakhovetski
|
||||
* Copyright (C) 2007 Ian Molton
|
||||
|
@ -1186,7 +1186,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host)
|
|||
mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities;
|
||||
mmc->caps2 |= pdata->capabilities2;
|
||||
mmc->max_segs = pdata->max_segs ? : 32;
|
||||
mmc->max_blk_size = 512;
|
||||
mmc->max_blk_size = TMIO_MAX_BLK_SIZE;
|
||||
mmc->max_blk_count = pdata->max_blk_count ? :
|
||||
(PAGE_SIZE / mmc->max_blk_size) * mmc->max_segs;
|
||||
mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
|
||||
|
|
|
@ -1339,7 +1339,7 @@ static int usdhi6_stop_cmd(struct usdhi6_host *host)
|
|||
host->wait = USDHI6_WAIT_FOR_STOP;
|
||||
return 0;
|
||||
}
|
||||
/* Unsupported STOP command */
|
||||
/* fall through - Unsupported STOP command. */
|
||||
default:
|
||||
dev_err(mmc_dev(host->mmc),
|
||||
"unsupported stop CMD%d for CMD%d\n",
|
||||
|
@ -1687,7 +1687,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
|
|||
switch (host->wait) {
|
||||
default:
|
||||
dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait);
|
||||
/* mrq can be NULL in this actually impossible case */
|
||||
/* fall through - mrq can be NULL, but is impossible. */
|
||||
case USDHI6_WAIT_FOR_CMD:
|
||||
usdhi6_error_code(host);
|
||||
if (mrq)
|
||||
|
@ -1709,10 +1709,7 @@ static void usdhi6_timeout_work(struct work_struct *work)
|
|||
host->offset, data->blocks, data->blksz, data->sg_len,
|
||||
sg_dma_len(sg), sg->offset);
|
||||
usdhi6_sg_unmap(host, true);
|
||||
/*
|
||||
* If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped
|
||||
* the page
|
||||
*/
|
||||
/* fall through - page unmapped in USDHI6_WAIT_FOR_DATA_END. */
|
||||
case USDHI6_WAIT_FOR_DATA_END:
|
||||
usdhi6_error_code(host);
|
||||
data->error = -ETIMEDOUT;
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
#define AU6601_BASE_CLOCK 31000000
|
||||
#define AU6601_MIN_CLOCK 150000
|
||||
#define AU6601_MAX_CLOCK 208000000
|
||||
#define AU6601_MAX_DMA_SEGMENTS 1
|
||||
#define AU6601_MAX_DMA_SEGMENTS 64
|
||||
#define AU6601_MAX_PIO_SEGMENTS 1
|
||||
#define AU6601_MAX_DMA_BLOCK_SIZE 0x1000
|
||||
#define AU6601_MAX_PIO_BLOCK_SIZE 0x200
|
||||
|
|
Loading…
Reference in New Issue
Block a user