dmaengine fixes for v5.2-rc4

The fixes for this round are in drivers:
  - jz4780 transfer fix for acking descriptors early
  - fsl-qdma: clean registers on error
  - dw-axi-dmac: null pointer dereference fix
  - mediatek-cqdma: fix sleeping in atomic context
  - tegra210-adma: fix bunch os issues like crashing in driver
    probe, channel FIFO configuration etc.
  - sprd: Fixes for possible crash on descriptor status, block
    length overflow. For 2-stage transfer fix incorrect start,
    configuration and interrupt handling.
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJc+3afAAoJEHwUBw8lI4NHYe0P/RVwZso3RKbAALFg6MJLyWun
 UpIJ9NA28xcKyghXa3mbq5gFgqkwJedACaXaik/wZr23+FJodg1afy7cfZRq3l5I
 arOmc7U2HK78L+h6T6JbIRP+pxIHHlrMaVwyO2ZwQ2jJmwwV/KyXgfxmv2ZIc1cg
 0M2C22pgy7oacK47eyjfcF/yH6PWARVaUlUHB+0pXXgwpImNl9IO+ritEzJg6hA2
 boltBfHX86XB/qfbnHaqo7bGUhEY4kqQafHKDNUh+jm7GgoI5biVC7dtH7liI0es
 U3I+RXFbOi4sxaY/j5aRgCNCnc7nUdU806ma3DPuTwybeca0ixttazjtB1p56ewU
 /NkhiuWf/qShZkepGlB50l7CAJ4Q0of2tPFr8pnI9OdZl8MzcoHIhN95IT/aEByf
 f0cbwv8lAeU948zm63axs5eS36M1yoV/cUzzeLatXWY3CS82FlTiEaWGnYkfZJ9j
 DNOr7WGk81A66kNEDIq65H9qQd86kRdNgoy1f7DNxBOjzVTzXGIATM+zpb3W/tsb
 Qb3i8OD9Lo4TzWCiDqbasGYKjAwGougcRKZWvSv/qfuItq/vGnChSNs7cMx73znJ
 EfgG+/kiKEn2hj9E5eSduqOrBaL4g+AJlqb93fpH54Jjr3QaupvhCZ2bYhrb9QrU
 rW3FZfGsAcrlsPbOlIOW
 =Ca0d
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-fix-5.2-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:

 - jz4780 transfer fix for acking descriptors early

 - fsl-qdma: clean registers on error

 - dw-axi-dmac: null pointer dereference fix

 - mediatek-cqdma: fix sleeping in atomic context

 - tegra210-adma: fix bunch os issues like crashing in driver probe,
   channel FIFO configuration etc.

 - sprd: Fixes for possible crash on descriptor status, block length
   overflow. For 2-stage transfer fix incorrect start, configuration and
   interrupt handling.

* tag 'dmaengine-fix-5.2-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: sprd: Add interrupt support for 2-stage transfer
  dmaengine: sprd: Fix the right place to configure 2-stage transfer
  dmaengine: sprd: Fix block length overflow
  dmaengine: sprd: Fix the incorrect start for 2-stage destination channels
  dmaengine: sprd: Add validation of current descriptor in irq handler
  dmaengine: sprd: Fix the possible crash when getting descriptor status
  dmaengine: tegra210-adma: Fix spelling
  dmaengine: tegra210-adma: Fix channel FIFO configuration
  dmaengine: tegra210-adma: Fix crash during probe
  dmaengine: mediatek-cqdma: sleeping in atomic context
  dmaengine: dw-axi-dmac: fix null dereference when pointer first is null
  dmaengine: fsl-qdma: Add improvement
  dmaengine: jz4780: Fix transfers being ACKed too soon
This commit is contained in:
Linus Torvalds 2019-06-08 12:46:31 -07:00
commit 66b59f2b5e
6 changed files with 100 additions and 49 deletions

View File

@ -662,10 +662,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
return status; return status;
} }
static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
struct jz4780_dma_chan *jzchan) struct jz4780_dma_chan *jzchan)
{ {
uint32_t dcs; uint32_t dcs;
bool ack = true;
spin_lock(&jzchan->vchan.lock); spin_lock(&jzchan->vchan.lock);
@ -688,12 +689,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
if (jzchan->desc->type == DMA_CYCLIC) { if (jzchan->desc->type == DMA_CYCLIC) {
vchan_cyclic_callback(&jzchan->desc->vdesc); vchan_cyclic_callback(&jzchan->desc->vdesc);
} else {
jz4780_dma_begin(jzchan);
} else if (dcs & JZ_DMA_DCS_TT) {
vchan_cookie_complete(&jzchan->desc->vdesc); vchan_cookie_complete(&jzchan->desc->vdesc);
jzchan->desc = NULL; jzchan->desc = NULL;
}
jz4780_dma_begin(jzchan); jz4780_dma_begin(jzchan);
} else {
/* False positive - continue the transfer */
ack = false;
jz4780_dma_chn_writel(jzdma, jzchan->id,
JZ_DMA_REG_DCS,
JZ_DMA_DCS_CTE);
}
} }
} else { } else {
dev_err(&jzchan->vchan.chan.dev->device, dev_err(&jzchan->vchan.chan.dev->device,
@ -701,21 +710,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
} }
spin_unlock(&jzchan->vchan.lock); spin_unlock(&jzchan->vchan.lock);
return ack;
} }
static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{ {
struct jz4780_dma_dev *jzdma = data; struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels;
uint32_t pending, dmac; uint32_t pending, dmac;
int i; int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
for (i = 0; i < jzdma->soc_data->nb_channels; i++) { for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
if (!(pending & (1<<i))) if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
continue; pending &= ~BIT(i);
jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
} }
/* Clear halt and address error status of all channels. */ /* Clear halt and address error status of all channels. */
@ -724,7 +734,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac); jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */ /* Clear interrupt pending status. */
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0); jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
return IRQ_HANDLED; return IRQ_HANDLED;
} }

View File

@ -512,7 +512,8 @@ dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
return vchan_tx_prep(&chan->vc, &first->vd, flags); return vchan_tx_prep(&chan->vc, &first->vd, flags);
err_desc_get: err_desc_get:
axi_desc_put(first); if (first)
axi_desc_put(first);
return NULL; return NULL;
} }

View File

@ -701,10 +701,8 @@ static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR); intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
if (intr) { if (intr)
dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n"); dev_err(fsl_qdma->dma_dev.dev, "DMA transaction error!\n");
return IRQ_NONE;
}
qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR); qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
return IRQ_HANDLED; return IRQ_HANDLED;

View File

@ -225,7 +225,7 @@ static int mtk_cqdma_hard_reset(struct mtk_cqdma_pchan *pc)
mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); mtk_dma_set(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT); mtk_dma_clr(pc, MTK_CQDMA_RESET, MTK_CQDMA_HARD_RST_BIT);
return mtk_cqdma_poll_engine_done(pc, false); return mtk_cqdma_poll_engine_done(pc, true);
} }
static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc, static void mtk_cqdma_start(struct mtk_cqdma_pchan *pc,
@ -671,7 +671,7 @@ static void mtk_cqdma_free_chan_resources(struct dma_chan *c)
mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT); mtk_dma_set(cvc->pc, MTK_CQDMA_FLUSH, MTK_CQDMA_FLUSH_BIT);
/* wait for the completion of flush operation */ /* wait for the completion of flush operation */
if (mtk_cqdma_poll_engine_done(cvc->pc, false) < 0) if (mtk_cqdma_poll_engine_done(cvc->pc, true) < 0)
dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n"); dev_err(cqdma2dev(to_cqdma_dev(c)), "cqdma flush timeout\n");
/* clear the flush bit and interrupt flag */ /* clear the flush bit and interrupt flag */

View File

@ -62,6 +62,8 @@
/* SPRD_DMA_GLB_2STAGE_GRP register definition */ /* SPRD_DMA_GLB_2STAGE_GRP register definition */
#define SPRD_DMA_GLB_2STAGE_EN BIT(24) #define SPRD_DMA_GLB_2STAGE_EN BIT(24)
#define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20) #define SPRD_DMA_GLB_CHN_INT_MASK GENMASK(23, 20)
#define SPRD_DMA_GLB_DEST_INT BIT(22)
#define SPRD_DMA_GLB_SRC_INT BIT(20)
#define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19) #define SPRD_DMA_GLB_LIST_DONE_TRG BIT(19)
#define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18) #define SPRD_DMA_GLB_TRANS_DONE_TRG BIT(18)
#define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17) #define SPRD_DMA_GLB_BLOCK_DONE_TRG BIT(17)
@ -135,6 +137,7 @@
/* define DMA channel mode & trigger mode mask */ /* define DMA channel mode & trigger mode mask */
#define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0) #define SPRD_DMA_CHN_MODE_MASK GENMASK(7, 0)
#define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0) #define SPRD_DMA_TRG_MODE_MASK GENMASK(7, 0)
#define SPRD_DMA_INT_TYPE_MASK GENMASK(7, 0)
/* define the DMA transfer step type */ /* define the DMA transfer step type */
#define SPRD_DMA_NONE_STEP 0 #define SPRD_DMA_NONE_STEP 0
@ -190,6 +193,7 @@ struct sprd_dma_chn {
u32 dev_id; u32 dev_id;
enum sprd_dma_chn_mode chn_mode; enum sprd_dma_chn_mode chn_mode;
enum sprd_dma_trg_mode trg_mode; enum sprd_dma_trg_mode trg_mode;
enum sprd_dma_int_type int_type;
struct sprd_dma_desc *cur_desc; struct sprd_dma_desc *cur_desc;
}; };
@ -429,6 +433,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
val |= SPRD_DMA_GLB_2STAGE_EN; val |= SPRD_DMA_GLB_2STAGE_EN;
if (schan->int_type != SPRD_DMA_NO_INT)
val |= SPRD_DMA_GLB_SRC_INT;
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
break; break;
@ -436,6 +443,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = chn & SPRD_DMA_GLB_SRC_CHN_MASK; val = chn & SPRD_DMA_GLB_SRC_CHN_MASK;
val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET; val |= BIT(schan->trg_mode - 1) << SPRD_DMA_GLB_TRG_OFFSET;
val |= SPRD_DMA_GLB_2STAGE_EN; val |= SPRD_DMA_GLB_2STAGE_EN;
if (schan->int_type != SPRD_DMA_NO_INT)
val |= SPRD_DMA_GLB_SRC_INT;
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
break; break;
@ -443,6 +453,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
SPRD_DMA_GLB_DEST_CHN_MASK; SPRD_DMA_GLB_DEST_CHN_MASK;
val |= SPRD_DMA_GLB_2STAGE_EN; val |= SPRD_DMA_GLB_2STAGE_EN;
if (schan->int_type != SPRD_DMA_NO_INT)
val |= SPRD_DMA_GLB_DEST_INT;
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val); sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP1, val, val);
break; break;
@ -450,6 +463,9 @@ static int sprd_dma_set_2stage_config(struct sprd_dma_chn *schan)
val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) & val = (chn << SPRD_DMA_GLB_DEST_CHN_OFFSET) &
SPRD_DMA_GLB_DEST_CHN_MASK; SPRD_DMA_GLB_DEST_CHN_MASK;
val |= SPRD_DMA_GLB_2STAGE_EN; val |= SPRD_DMA_GLB_2STAGE_EN;
if (schan->int_type != SPRD_DMA_NO_INT)
val |= SPRD_DMA_GLB_DEST_INT;
sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val); sprd_dma_glb_update(sdev, SPRD_DMA_GLB_2STAGE_GRP2, val, val);
break; break;
@ -510,7 +526,9 @@ static void sprd_dma_start(struct sprd_dma_chn *schan)
sprd_dma_set_uid(schan); sprd_dma_set_uid(schan);
sprd_dma_enable_chn(schan); sprd_dma_enable_chn(schan);
if (schan->dev_id == SPRD_DMA_SOFTWARE_UID) if (schan->dev_id == SPRD_DMA_SOFTWARE_UID &&
schan->chn_mode != SPRD_DMA_DST_CHN0 &&
schan->chn_mode != SPRD_DMA_DST_CHN1)
sprd_dma_soft_request(schan); sprd_dma_soft_request(schan);
} }
@ -552,12 +570,17 @@ static irqreturn_t dma_irq_handle(int irq, void *dev_id)
schan = &sdev->channels[i]; schan = &sdev->channels[i];
spin_lock(&schan->vc.lock); spin_lock(&schan->vc.lock);
sdesc = schan->cur_desc;
if (!sdesc) {
spin_unlock(&schan->vc.lock);
return IRQ_HANDLED;
}
int_type = sprd_dma_get_int_type(schan); int_type = sprd_dma_get_int_type(schan);
req_type = sprd_dma_get_req_type(schan); req_type = sprd_dma_get_req_type(schan);
sprd_dma_clear_int(schan); sprd_dma_clear_int(schan);
sdesc = schan->cur_desc;
/* cyclic mode schedule callback */ /* cyclic mode schedule callback */
cyclic = schan->linklist.phy_addr ? true : false; cyclic = schan->linklist.phy_addr ? true : false;
if (cyclic == true) { if (cyclic == true) {
@ -625,7 +648,7 @@ static enum dma_status sprd_dma_tx_status(struct dma_chan *chan,
else else
pos = 0; pos = 0;
} else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) { } else if (schan->cur_desc && schan->cur_desc->vd.tx.cookie == cookie) {
struct sprd_dma_desc *sdesc = to_sprd_dma_desc(vd); struct sprd_dma_desc *sdesc = schan->cur_desc;
if (sdesc->dir == DMA_DEV_TO_MEM) if (sdesc->dir == DMA_DEV_TO_MEM)
pos = sprd_dma_get_dst_addr(schan); pos = sprd_dma_get_dst_addr(schan);
@ -771,7 +794,7 @@ static int sprd_dma_fill_desc(struct dma_chan *chan,
temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK; temp |= slave_cfg->src_maxburst & SPRD_DMA_FRG_LEN_MASK;
hw->frg_len = temp; hw->frg_len = temp;
hw->blk_len = len & SPRD_DMA_BLK_LEN_MASK; hw->blk_len = slave_cfg->src_maxburst & SPRD_DMA_BLK_LEN_MASK;
hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK; hw->trsc_len = len & SPRD_DMA_TRSC_LEN_MASK;
temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET; temp = (dst_step & SPRD_DMA_TRSF_STEP_MASK) << SPRD_DMA_DEST_TRSF_STEP_OFFSET;
@ -904,6 +927,16 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
schan->linklist.virt_addr = 0; schan->linklist.virt_addr = 0;
} }
/*
* Set channel mode, interrupt mode and trigger mode for 2-stage
* transfer.
*/
schan->chn_mode =
(flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
schan->trg_mode =
(flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
schan->int_type = flags & SPRD_DMA_INT_TYPE_MASK;
sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT); sdesc = kzalloc(sizeof(*sdesc), GFP_NOWAIT);
if (!sdesc) if (!sdesc)
return NULL; return NULL;
@ -937,12 +970,6 @@ sprd_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
} }
} }
/* Set channel mode and trigger mode for 2-stage transfer */
schan->chn_mode =
(flags >> SPRD_DMA_CHN_MODE_SHIFT) & SPRD_DMA_CHN_MODE_MASK;
schan->trg_mode =
(flags >> SPRD_DMA_TRG_MODE_SHIFT) & SPRD_DMA_TRG_MODE_MASK;
ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len, ret = sprd_dma_fill_desc(chan, &sdesc->chn_hw, 0, 0, src, dst, len,
dir, flags, slave_cfg); dir, flags, slave_cfg);
if (ret) { if (ret) {

View File

@ -42,10 +42,14 @@
#define ADMA_CH_CONFIG_MAX_BUFS 8 #define ADMA_CH_CONFIG_MAX_BUFS 8
#define ADMA_CH_FIFO_CTRL 0x2c #define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_FIFO_CTRL_OVRFW_THRES(val) (((val) & 0xf) << 24) #define TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0xf) << 24)
#define ADMA_CH_FIFO_CTRL_STARV_THRES(val) (((val) & 0xf) << 16) #define TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0xf) << 16)
#define ADMA_CH_FIFO_CTRL_TX_FIFO_SIZE_SHIFT 8 #define TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0xf) << 8)
#define ADMA_CH_FIFO_CTRL_RX_FIFO_SIZE_SHIFT 0 #define TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0xf)
#define TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(val) (((val) & 0x1f) << 24)
#define TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(val) (((val) & 0x1f) << 16)
#define TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(val) (((val) & 0x1f) << 8)
#define TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(val) ((val) & 0x1f)
#define ADMA_CH_LOWER_SRC_ADDR 0x34 #define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c #define ADMA_CH_LOWER_TRG_ADDR 0x3c
@ -60,8 +64,15 @@
#define TEGRA_ADMA_BURST_COMPLETE_TIME 20 #define TEGRA_ADMA_BURST_COMPLETE_TIME 20
#define ADMA_CH_FIFO_CTRL_DEFAULT (ADMA_CH_FIFO_CTRL_OVRFW_THRES(1) | \ #define TEGRA210_FIFO_CTRL_DEFAULT (TEGRA210_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
ADMA_CH_FIFO_CTRL_STARV_THRES(1)) TEGRA210_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
TEGRA210_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA210_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define TEGRA186_FIFO_CTRL_DEFAULT (TEGRA186_ADMA_CH_FIFO_CTRL_OFLWTHRES(1) | \
TEGRA186_ADMA_CH_FIFO_CTRL_STRVTHRES(1) | \
TEGRA186_ADMA_CH_FIFO_CTRL_TXSIZE(3) | \
TEGRA186_ADMA_CH_FIFO_CTRL_RXSIZE(3))
#define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift) #define ADMA_CH_REG_FIELD_VAL(val, mask, shift) (((val) & mask) << shift)
@ -73,7 +84,8 @@ struct tegra_adma;
* @global_int_clear: Register offset of DMA global interrupt clear. * @global_int_clear: Register offset of DMA global interrupt clear.
* @ch_req_tx_shift: Register offset for AHUB transmit channel select. * @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select. * @ch_req_rx_shift: Register offset for AHUB receive channel select.
* @ch_base_offset: Reister offset of DMA channel registers. * @ch_base_offset: Register offset of DMA channel registers.
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
* @ch_req_mask: Mask for Tx or Rx channel select. * @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_req_max: Maximum number of Tx or Rx channels available. * @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space. * @ch_reg_size: Size of DMA channel register space.
@ -86,6 +98,7 @@ struct tegra_adma_chip_data {
unsigned int ch_req_tx_shift; unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift; unsigned int ch_req_rx_shift;
unsigned int ch_base_offset; unsigned int ch_base_offset;
unsigned int ch_fifo_ctrl;
unsigned int ch_req_mask; unsigned int ch_req_mask;
unsigned int ch_req_max; unsigned int ch_req_max;
unsigned int ch_reg_size; unsigned int ch_reg_size;
@ -589,7 +602,7 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
ADMA_CH_CTRL_FLOWCTRL_EN; ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size); ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1); ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
ch_regs->fifo_ctrl = ADMA_CH_FIFO_CTRL_DEFAULT; ch_regs->fifo_ctrl = cdata->ch_fifo_ctrl;
ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK; ch_regs->tc = desc->period_len & ADMA_CH_TC_COUNT_MASK;
return tegra_adma_request_alloc(tdc, direction); return tegra_adma_request_alloc(tdc, direction);
@ -773,6 +786,7 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.ch_req_tx_shift = 28, .ch_req_tx_shift = 28,
.ch_req_rx_shift = 24, .ch_req_rx_shift = 24,
.ch_base_offset = 0, .ch_base_offset = 0,
.ch_fifo_ctrl = TEGRA210_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0xf, .ch_req_mask = 0xf,
.ch_req_max = 10, .ch_req_max = 10,
.ch_reg_size = 0x80, .ch_reg_size = 0x80,
@ -786,6 +800,7 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.ch_req_tx_shift = 27, .ch_req_tx_shift = 27,
.ch_req_rx_shift = 22, .ch_req_rx_shift = 22,
.ch_base_offset = 0x10000, .ch_base_offset = 0x10000,
.ch_fifo_ctrl = TEGRA186_FIFO_CTRL_DEFAULT,
.ch_req_mask = 0x1f, .ch_req_mask = 0x1f,
.ch_req_max = 20, .ch_req_max = 20,
.ch_reg_size = 0x100, .ch_reg_size = 0x100,
@ -834,16 +849,6 @@ static int tegra_adma_probe(struct platform_device *pdev)
return PTR_ERR(tdma->ahub_clk); return PTR_ERR(tdma->ahub_clk);
} }
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
goto rpm_disable;
ret = tegra_adma_init(tdma);
if (ret)
goto rpm_put;
INIT_LIST_HEAD(&tdma->dma_dev.channels); INIT_LIST_HEAD(&tdma->dma_dev.channels);
for (i = 0; i < tdma->nr_channels; i++) { for (i = 0; i < tdma->nr_channels; i++) {
struct tegra_adma_chan *tdc = &tdma->channels[i]; struct tegra_adma_chan *tdc = &tdma->channels[i];
@ -862,6 +867,16 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->tdma = tdma; tdc->tdma = tdma;
} }
pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev);
if (ret < 0)
goto rpm_disable;
ret = tegra_adma_init(tdma);
if (ret)
goto rpm_put;
dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask); dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask); dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
@ -905,13 +920,13 @@ static int tegra_adma_probe(struct platform_device *pdev)
dma_remove: dma_remove:
dma_async_device_unregister(&tdma->dma_dev); dma_async_device_unregister(&tdma->dma_dev);
irq_dispose:
while (--i >= 0)
irq_dispose_mapping(tdma->channels[i].irq);
rpm_put: rpm_put:
pm_runtime_put_sync(&pdev->dev); pm_runtime_put_sync(&pdev->dev);
rpm_disable: rpm_disable:
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
irq_dispose:
while (--i >= 0)
irq_dispose_mapping(tdma->channels[i].irq);
return ret; return ret;
} }