diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index e434ffe7bc5c..832cbd647145 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -2067,7 +2067,7 @@ static int at_xdmac_probe(struct platform_device *pdev) err_clk_disable: clk_disable_unprepare(atxdmac->clk); err_free_irq: - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); return ret; } @@ -2081,7 +2081,7 @@ static int at_xdmac_remove(struct platform_device *pdev) dma_async_device_unregister(&atxdmac->dma); clk_disable_unprepare(atxdmac->clk); - free_irq(atxdmac->irq, atxdmac->dma.dev); + free_irq(atxdmac->irq, atxdmac); for (i = 0; i < atxdmac->dma.chancnt; i++) { struct at_xdmac_chan *atchan = &atxdmac->chan[i]; diff --git a/drivers/dma/fsl_raid.c b/drivers/dma/fsl_raid.c index aad167eaaee8..de2a2a2b1d75 100644 --- a/drivers/dma/fsl_raid.c +++ b/drivers/dma/fsl_raid.c @@ -836,6 +836,7 @@ static int fsl_re_probe(struct platform_device *ofdev) rc = of_property_read_u32(np, "reg", &off); if (rc) { dev_err(dev, "Reg property not found in JQ node\n"); + of_node_put(np); return -ENODEV; } /* Find out the Job Rings present under each JQ */ diff --git a/drivers/dma/img-mdc-dma.c b/drivers/dma/img-mdc-dma.c index a4c53be482cf..624f1e1e9c55 100644 --- a/drivers/dma/img-mdc-dma.c +++ b/drivers/dma/img-mdc-dma.c @@ -861,7 +861,6 @@ static int mdc_dma_probe(struct platform_device *pdev) { struct mdc_dma *mdma; struct resource *res; - const struct of_device_id *match; unsigned int i; u32 val; int ret; @@ -871,8 +870,7 @@ static int mdc_dma_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, mdma); - match = of_match_device(mdc_dma_of_match, &pdev->dev); - mdma->soc = match->data; + mdma->soc = of_device_get_match_data(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mdma->regs = devm_ioremap_resource(&pdev->dev, res); diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index dc7850a422b8..3f56f9ca4482 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -638,7 +638,7 @@ static bool pxad_try_hotchain(struct virt_dma_chan *vc, vd_last_issued = list_entry(vc->desc_issued.prev, struct virt_dma_desc, node); pxad_desc_chain(vd_last_issued, vd); - if (is_chan_running(chan) || is_desc_completed(vd_last_issued)) + if (is_chan_running(chan) || is_desc_completed(vd)) return true; } @@ -671,6 +671,7 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) struct virt_dma_desc *vd, *tmp; unsigned int dcsr; unsigned long flags; + bool vd_completed; dma_cookie_t last_started = 0; BUG_ON(!chan); @@ -681,15 +682,17 @@ static irqreturn_t pxad_chan_handler(int irq, void *dev_id) spin_lock_irqsave(&chan->vc.lock, flags); list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { + vd_completed = is_desc_completed(vd); dev_dbg(&chan->vc.chan.dev->device, - "%s(): checking txd %p[%x]: completed=%d\n", - __func__, vd, vd->tx.cookie, is_desc_completed(vd)); + "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n", + __func__, vd, vd->tx.cookie, vd_completed, + dcsr); last_started = vd->tx.cookie; if (to_pxad_sw_desc(vd)->cyclic) { vchan_cyclic_callback(vd); break; } - if (is_desc_completed(vd)) { + if (vd_completed) { list_del(&vd->node); vchan_cookie_complete(vd); } else { diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index 749f1bd5d65d..06ecdc38cee0 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -600,27 +600,30 @@ static irqreturn_t usb_dmac_isr_channel(int irq, void *dev) { struct usb_dmac_chan *chan = dev; irqreturn_t ret = IRQ_NONE; - u32 mask = USB_DMACHCR_TE; - u32 check_bits = USB_DMACHCR_TE | USB_DMACHCR_SP; + u32 mask = 0; u32 chcr; + bool xfer_end = false; spin_lock(&chan->vc.lock); chcr = usb_dmac_chan_read(chan, USB_DMACHCR); - if (chcr & check_bits) - mask |= USB_DMACHCR_DE | check_bits; + if (chcr & (USB_DMACHCR_TE | USB_DMACHCR_SP)) { + mask |= USB_DMACHCR_DE | USB_DMACHCR_TE | USB_DMACHCR_SP; + if (chcr & USB_DMACHCR_DE) + xfer_end = true; + ret |= IRQ_HANDLED; + } if (chcr & USB_DMACHCR_NULL) { /* An interruption of TE will happen after we set FTE */ mask |= USB_DMACHCR_NULL; chcr |= USB_DMACHCR_FTE; ret |= IRQ_HANDLED; } - usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); + if (mask) + usb_dmac_chan_write(chan, USB_DMACHCR, chcr & ~mask); - if (chcr & check_bits) { + if (xfer_end) usb_dmac_isr_transfer_end(chan); - ret |= IRQ_HANDLED; - } spin_unlock(&chan->vc.lock);