forked from luck/tmp_suning_uos_patched
dmaengine: shdma: restore partial transfer calculation
The recent shdma driver split has mistakenly removed support for partial DMA transfer size calculation on forced termination. This patch restores it. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Acked-by: Vinod Koul <vinod.koul@linux.intel.com> Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
ac694dbdbc
commit
4f46f8ac80
|
@ -483,6 +483,7 @@ static struct shdma_desc *shdma_add_desc(struct shdma_chan *schan,
|
|||
new->mark = DESC_PREPARED;
|
||||
new->async_tx.flags = flags;
|
||||
new->direction = direction;
|
||||
new->partial = 0;
|
||||
|
||||
*len -= copy_size;
|
||||
if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
|
||||
|
@ -644,6 +645,14 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
|
|||
case DMA_TERMINATE_ALL:
|
||||
spin_lock_irqsave(&schan->chan_lock, flags);
|
||||
ops->halt_channel(schan);
|
||||
|
||||
if (ops->get_partial && !list_empty(&schan->ld_queue)) {
|
||||
/* Record partial transfer */
|
||||
struct shdma_desc *desc = list_first_entry(&schan->ld_queue,
|
||||
struct shdma_desc, node);
|
||||
desc->partial = ops->get_partial(schan, desc);
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&schan->chan_lock, flags);
|
||||
|
||||
shdma_chan_ld_cleanup(schan, true);
|
||||
|
|
|
@ -381,6 +381,17 @@ static bool sh_dmae_chan_irq(struct shdma_chan *schan, int irq)
|
|||
return true;
|
||||
}
|
||||
|
||||
static size_t sh_dmae_get_partial(struct shdma_chan *schan,
|
||||
struct shdma_desc *sdesc)
|
||||
{
|
||||
struct sh_dmae_chan *sh_chan = container_of(schan, struct sh_dmae_chan,
|
||||
shdma_chan);
|
||||
struct sh_dmae_desc *sh_desc = container_of(sdesc,
|
||||
struct sh_dmae_desc, shdma_desc);
|
||||
return (sh_desc->hw.tcr - sh_dmae_readl(sh_chan, TCR)) <<
|
||||
sh_chan->xmit_shift;
|
||||
}
|
||||
|
||||
/* Called from error IRQ or NMI */
|
||||
static bool sh_dmae_reset(struct sh_dmae_device *shdev)
|
||||
{
|
||||
|
@ -632,6 +643,7 @@ static const struct shdma_ops sh_dmae_shdma_ops = {
|
|||
.start_xfer = sh_dmae_start_xfer,
|
||||
.embedded_desc = sh_dmae_embedded_desc,
|
||||
.chan_irq = sh_dmae_chan_irq,
|
||||
.get_partial = sh_dmae_get_partial,
|
||||
};
|
||||
|
||||
static int __devinit sh_dmae_probe(struct platform_device *pdev)
|
||||
|
|
|
@ -50,6 +50,7 @@ struct shdma_desc {
|
|||
struct list_head node;
|
||||
struct dma_async_tx_descriptor async_tx;
|
||||
enum dma_transfer_direction direction;
|
||||
size_t partial;
|
||||
dma_cookie_t cookie;
|
||||
int chunks;
|
||||
int mark;
|
||||
|
@ -98,6 +99,7 @@ struct shdma_ops {
|
|||
void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
|
||||
struct shdma_desc *(*embedded_desc)(void *, int);
|
||||
bool (*chan_irq)(struct shdma_chan *, int);
|
||||
size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
|
||||
};
|
||||
|
||||
struct shdma_dev {
|
||||
|
|
Loading…
Reference in New Issue
Block a user