forked from luck/tmp_suning_uos_patched
serial: sh-sci: Switch to dma_map_single() for DMA transmission
Simplify the DMA transmit code by using dma_map_single() instead of constantly modifying the single-entry scatterlist to match what's currently being transmitted. Signed-off-by: Geert Uytterhoeven <geert+renesas@glider.be> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
092248aa32
commit
79904420b7
@ -109,8 +109,8 @@ struct sci_port {
|
|||||||
dma_cookie_t cookie_tx;
|
dma_cookie_t cookie_tx;
|
||||||
dma_cookie_t cookie_rx[2];
|
dma_cookie_t cookie_rx[2];
|
||||||
dma_cookie_t active_rx;
|
dma_cookie_t active_rx;
|
||||||
struct scatterlist sg_tx;
|
dma_addr_t tx_dma_addr;
|
||||||
unsigned int sg_len_tx;
|
unsigned int tx_dma_len;
|
||||||
struct scatterlist sg_rx[2];
|
struct scatterlist sg_rx[2];
|
||||||
size_t buf_len_rx;
|
size_t buf_len_rx;
|
||||||
struct sh_dmae_slave param_tx;
|
struct sh_dmae_slave param_tx;
|
||||||
@ -1280,10 +1280,10 @@ static void sci_dma_tx_complete(void *arg)
|
|||||||
|
|
||||||
spin_lock_irqsave(&port->lock, flags);
|
spin_lock_irqsave(&port->lock, flags);
|
||||||
|
|
||||||
xmit->tail += sg_dma_len(&s->sg_tx);
|
xmit->tail += s->tx_dma_len;
|
||||||
xmit->tail &= UART_XMIT_SIZE - 1;
|
xmit->tail &= UART_XMIT_SIZE - 1;
|
||||||
|
|
||||||
port->icount.tx += sg_dma_len(&s->sg_tx);
|
port->icount.tx += s->tx_dma_len;
|
||||||
|
|
||||||
async_tx_ack(s->desc_tx);
|
async_tx_ack(s->desc_tx);
|
||||||
s->desc_tx = NULL;
|
s->desc_tx = NULL;
|
||||||
@ -1494,7 +1494,7 @@ static void work_fn_tx(struct work_struct *work)
|
|||||||
struct dma_chan *chan = s->chan_tx;
|
struct dma_chan *chan = s->chan_tx;
|
||||||
struct uart_port *port = &s->port;
|
struct uart_port *port = &s->port;
|
||||||
struct circ_buf *xmit = &port->state->xmit;
|
struct circ_buf *xmit = &port->state->xmit;
|
||||||
struct scatterlist *sg = &s->sg_tx;
|
dma_addr_t buf;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* DMA is idle now.
|
* DMA is idle now.
|
||||||
@ -1504,19 +1504,15 @@ static void work_fn_tx(struct work_struct *work)
|
|||||||
* consistent xmit buffer state.
|
* consistent xmit buffer state.
|
||||||
*/
|
*/
|
||||||
spin_lock_irq(&port->lock);
|
spin_lock_irq(&port->lock);
|
||||||
sg->offset = xmit->tail & (UART_XMIT_SIZE - 1);
|
buf = s->tx_dma_addr + (xmit->tail & (UART_XMIT_SIZE - 1));
|
||||||
sg_dma_address(sg) = (sg_dma_address(sg) & ~(UART_XMIT_SIZE - 1)) +
|
s->tx_dma_len = min_t(unsigned int,
|
||||||
sg->offset;
|
|
||||||
sg_dma_len(sg) = min_t(unsigned int,
|
|
||||||
CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
|
CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE),
|
||||||
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
|
CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE));
|
||||||
spin_unlock_irq(&port->lock);
|
spin_unlock_irq(&port->lock);
|
||||||
|
|
||||||
BUG_ON(!sg_dma_len(sg));
|
desc = dmaengine_prep_slave_single(chan, buf, s->tx_dma_len,
|
||||||
|
DMA_MEM_TO_DEV,
|
||||||
desc = dmaengine_prep_slave_sg(chan,
|
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||||
sg, s->sg_len_tx, DMA_MEM_TO_DEV,
|
|
||||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
|
dev_warn(port->dev, "Failed preparing Tx DMA descriptor\n");
|
||||||
/* switch to PIO */
|
/* switch to PIO */
|
||||||
@ -1524,7 +1520,8 @@ static void work_fn_tx(struct work_struct *work)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
dma_sync_sg_for_device(chan->device->dev, sg, 1, DMA_TO_DEVICE);
|
dma_sync_single_for_device(chan->device->dev, buf, s->tx_dma_len,
|
||||||
|
DMA_TO_DEVICE);
|
||||||
|
|
||||||
spin_lock_irq(&port->lock);
|
spin_lock_irq(&port->lock);
|
||||||
s->desc_tx = desc;
|
s->desc_tx = desc;
|
||||||
@ -1680,7 +1677,6 @@ static void sci_request_dma(struct uart_port *port)
|
|||||||
struct sh_dmae_slave *param;
|
struct sh_dmae_slave *param;
|
||||||
struct dma_chan *chan;
|
struct dma_chan *chan;
|
||||||
dma_cap_mask_t mask;
|
dma_cap_mask_t mask;
|
||||||
int nent;
|
|
||||||
|
|
||||||
dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
|
dev_dbg(port->dev, "%s: port %d\n", __func__, port->line);
|
||||||
|
|
||||||
@ -1700,27 +1696,21 @@ static void sci_request_dma(struct uart_port *port)
|
|||||||
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
|
dev_dbg(port->dev, "%s: TX: got channel %p\n", __func__, chan);
|
||||||
if (chan) {
|
if (chan) {
|
||||||
s->chan_tx = chan;
|
s->chan_tx = chan;
|
||||||
sg_init_table(&s->sg_tx, 1);
|
|
||||||
/* UART circular tx buffer is an aligned page. */
|
/* UART circular tx buffer is an aligned page. */
|
||||||
BUG_ON((uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
|
s->tx_dma_addr = dma_map_single(chan->device->dev,
|
||||||
sg_set_page(&s->sg_tx, virt_to_page(port->state->xmit.buf),
|
port->state->xmit.buf,
|
||||||
UART_XMIT_SIZE,
|
UART_XMIT_SIZE,
|
||||||
(uintptr_t)port->state->xmit.buf & ~PAGE_MASK);
|
DMA_TO_DEVICE);
|
||||||
nent = dma_map_sg(chan->device->dev, &s->sg_tx, 1,
|
if (dma_mapping_error(chan->device->dev, s->tx_dma_addr)) {
|
||||||
DMA_TO_DEVICE);
|
|
||||||
if (!nent) {
|
|
||||||
dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
|
dev_warn(port->dev, "Failed mapping Tx DMA descriptor\n");
|
||||||
dma_release_channel(chan);
|
dma_release_channel(chan);
|
||||||
s->chan_tx = NULL;
|
s->chan_tx = NULL;
|
||||||
} else {
|
} else {
|
||||||
dev_dbg(port->dev, "%s: mapped %d@%p to %pad\n",
|
dev_dbg(port->dev, "%s: mapped %lu@%p to %pad\n",
|
||||||
__func__,
|
__func__, UART_XMIT_SIZE,
|
||||||
sg_dma_len(&s->sg_tx), port->state->xmit.buf,
|
port->state->xmit.buf, &s->tx_dma_addr);
|
||||||
&sg_dma_address(&s->sg_tx));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
s->sg_len_tx = nent;
|
|
||||||
|
|
||||||
INIT_WORK(&s->work_tx, work_fn_tx);
|
INIT_WORK(&s->work_tx, work_fn_tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user