blob: fdbddbc6375da50932d3ce156d4bc2643de67b20 [file] [log] [blame]
Heikki Krogerus9ee4b832013-01-10 11:25:11 +02001/*
2 * 8250_dma.c - DMA Engine API support for 8250.c
3 *
4 * Copyright (C) 2013 Intel Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11#include <linux/tty.h>
12#include <linux/tty_flip.h>
13#include <linux/serial_reg.h>
14#include <linux/dma-mapping.h>
15
16#include "8250.h"
17
18static void __dma_tx_complete(void *param)
19{
20 struct uart_8250_port *p = param;
21 struct uart_8250_dma *dma = p->dma;
22 struct circ_buf *xmit = &p->port.state->xmit;
Loic Poulainf8fd1b02014-04-24 11:34:48 +020023 unsigned long flags;
Andy Shevchenko2dc98942014-11-06 13:28:16 +020024 int ret;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020025
26 dma_sync_single_for_cpu(dma->txchan->device->dev, dma->tx_addr,
27 UART_XMIT_SIZE, DMA_TO_DEVICE);
28
Loic Poulainf8fd1b02014-04-24 11:34:48 +020029 spin_lock_irqsave(&p->port.lock, flags);
30
31 dma->tx_running = 0;
32
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020033 xmit->tail += dma->tx_size;
34 xmit->tail &= UART_XMIT_SIZE - 1;
35 p->port.icount.tx += dma->tx_size;
36
37 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
38 uart_write_wakeup(&p->port);
39
Andy Shevchenko2dc98942014-11-06 13:28:16 +020040 ret = serial8250_tx_dma(p);
41 if (ret) {
Andy Shevchenko2dc98942014-11-06 13:28:16 +020042 p->ier |= UART_IER_THRI;
43 serial_port_out(&p->port, UART_IER, p->ier);
Sebastian Andrzej Siewiorb2202822014-09-29 20:06:40 +020044 }
Loic Poulainf8fd1b02014-04-24 11:34:48 +020045
46 spin_unlock_irqrestore(&p->port.lock, flags);
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020047}
48
49static void __dma_rx_complete(void *param)
50{
51 struct uart_8250_port *p = param;
52 struct uart_8250_dma *dma = p->dma;
Heikki Krogerus6f3fe3b2013-01-16 14:08:13 +020053 struct tty_port *tty_port = &p->port.state->port;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020054 struct dma_tx_state state;
Heikki Krogerus6f3fe3b2013-01-16 14:08:13 +020055 int count;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020056
Sebastian Andrzej Siewior0fcb7902014-09-29 20:06:41 +020057 dma->rx_running = 0;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020058 dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020059
Heikki Krogerus6f3fe3b2013-01-16 14:08:13 +020060 count = dma->rx_size - state.residue;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020061
Heikki Krogerus6f3fe3b2013-01-16 14:08:13 +020062 tty_insert_flip_string(tty_port, dma->rx_buf, count);
63 p->port.icount.rx += count;
64
65 tty_flip_buffer_push(tty_port);
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020066}
67
68int serial8250_tx_dma(struct uart_8250_port *p)
69{
70 struct uart_8250_dma *dma = p->dma;
71 struct circ_buf *xmit = &p->port.state->xmit;
72 struct dma_async_tx_descriptor *desc;
Sebastian Andrzej Siewiorb2202822014-09-29 20:06:40 +020073 int ret;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020074
Heikki Krogerus5ea5b242013-04-10 16:58:24 +030075 if (uart_tx_stopped(&p->port) || dma->tx_running ||
76 uart_circ_empty(xmit))
77 return 0;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020078
79 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020080
81 desc = dmaengine_prep_slave_single(dma->txchan,
82 dma->tx_addr + xmit->tail,
83 dma->tx_size, DMA_MEM_TO_DEV,
84 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
Sebastian Andrzej Siewiorb2202822014-09-29 20:06:40 +020085 if (!desc) {
86 ret = -EBUSY;
87 goto err;
88 }
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020089
90 dma->tx_running = 1;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +020091 desc->callback = __dma_tx_complete;
92 desc->callback_param = p;
93
94 dma->tx_cookie = dmaengine_submit(desc);
95
96 dma_sync_single_for_device(dma->txchan->device->dev, dma->tx_addr,
97 UART_XMIT_SIZE, DMA_TO_DEVICE);
98
99 dma_async_issue_pending(dma->txchan);
Sebastian Andrzej Siewiorb2202822014-09-29 20:06:40 +0200100 if (dma->tx_err) {
101 dma->tx_err = 0;
102 if (p->ier & UART_IER_THRI) {
103 p->ier &= ~UART_IER_THRI;
104 serial_out(p, UART_IER, p->ier);
105 }
106 }
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200107 return 0;
Sebastian Andrzej Siewiorb2202822014-09-29 20:06:40 +0200108err:
109 dma->tx_err = 1;
110 return ret;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200111}
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200112
Peter Hurley33d9b8b22016-04-09 22:14:36 -0700113int serial8250_rx_dma(struct uart_8250_port *p)
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200114{
115 struct uart_8250_dma *dma = p->dma;
116 struct dma_async_tx_descriptor *desc;
Heikki Krogerus75df0222013-04-10 16:58:25 +0300117
Sebastian Andrzej Siewior0fcb7902014-09-29 20:06:41 +0200118 if (dma->rx_running)
Heikki Krogerus75df0222013-04-10 16:58:25 +0300119 return 0;
120
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200121 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr,
122 dma->rx_size, DMA_DEV_TO_MEM,
123 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
124 if (!desc)
125 return -EBUSY;
126
Sebastian Andrzej Siewior0fcb7902014-09-29 20:06:41 +0200127 dma->rx_running = 1;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200128 desc->callback = __dma_rx_complete;
129 desc->callback_param = p;
130
131 dma->rx_cookie = dmaengine_submit(desc);
132
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200133 dma_async_issue_pending(dma->rxchan);
134
135 return 0;
136}
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200137
Peter Hurley33d9b8b22016-04-09 22:14:36 -0700138void serial8250_rx_dma_flush(struct uart_8250_port *p)
139{
140 struct uart_8250_dma *dma = p->dma;
141
142 if (dma->rx_running) {
143 dmaengine_pause(dma->rxchan);
144 __dma_rx_complete(p);
Andy Shevchenko8d170472016-08-17 19:20:24 +0300145 dmaengine_terminate_async(dma->rxchan);
Peter Hurley33d9b8b22016-04-09 22:14:36 -0700146 }
147}
Chuah, Kim Tattfd9e5162016-06-15 13:44:12 +0800148EXPORT_SYMBOL_GPL(serial8250_rx_dma_flush);
Peter Hurley33d9b8b22016-04-09 22:14:36 -0700149
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200150int serial8250_request_dma(struct uart_8250_port *p)
151{
152 struct uart_8250_dma *dma = p->dma;
Andy Shevchenkod1834ba2016-08-17 19:20:25 +0300153 phys_addr_t rx_dma_addr = dma->rx_dma_addr ?
154 dma->rx_dma_addr : p->port.mapbase;
155 phys_addr_t tx_dma_addr = dma->tx_dma_addr ?
156 dma->tx_dma_addr : p->port.mapbase;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200157 dma_cap_mask_t mask;
Peter Hurleyec5a11a2016-04-09 22:14:32 -0700158 struct dma_slave_caps caps;
159 int ret;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200160
Heikki Krogerusab194792013-04-10 16:58:27 +0300161 /* Default slave configuration parameters */
162 dma->rxconf.direction = DMA_DEV_TO_MEM;
163 dma->rxconf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
Andy Shevchenkod1834ba2016-08-17 19:20:25 +0300164 dma->rxconf.src_addr = rx_dma_addr + UART_RX;
Heikki Krogerusab194792013-04-10 16:58:27 +0300165
166 dma->txconf.direction = DMA_MEM_TO_DEV;
167 dma->txconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
Andy Shevchenkod1834ba2016-08-17 19:20:25 +0300168 dma->txconf.dst_addr = tx_dma_addr + UART_TX;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200169
170 dma_cap_zero(mask);
171 dma_cap_set(DMA_SLAVE, mask);
172
173 /* Get a channel for RX */
Heikki Krogeruse4fb3b82013-04-10 16:58:26 +0300174 dma->rxchan = dma_request_slave_channel_compat(mask,
175 dma->fn, dma->rx_param,
176 p->port.dev, "rx");
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200177 if (!dma->rxchan)
178 return -ENODEV;
179
Peter Hurleyec5a11a2016-04-09 22:14:32 -0700180 /* 8250 rx dma requires dmaengine driver to support pause/terminate */
181 ret = dma_get_slave_caps(dma->rxchan, &caps);
182 if (ret)
183 goto release_rx;
184 if (!caps.cmd_pause || !caps.cmd_terminate ||
185 caps.residue_granularity == DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
186 ret = -EINVAL;
187 goto release_rx;
188 }
189
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200190 dmaengine_slave_config(dma->rxchan, &dma->rxconf);
191
192 /* Get a channel for TX */
Heikki Krogeruse4fb3b82013-04-10 16:58:26 +0300193 dma->txchan = dma_request_slave_channel_compat(mask,
194 dma->fn, dma->tx_param,
195 p->port.dev, "tx");
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200196 if (!dma->txchan) {
Peter Hurleyec5a11a2016-04-09 22:14:32 -0700197 ret = -ENODEV;
198 goto release_rx;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200199 }
200
Peter Hurleyddedfd82016-04-09 22:14:33 -0700201 /* 8250 tx dma requires dmaengine driver to support terminate */
202 ret = dma_get_slave_caps(dma->txchan, &caps);
203 if (ret)
204 goto err;
205 if (!caps.cmd_terminate) {
206 ret = -EINVAL;
207 goto err;
208 }
209
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200210 dmaengine_slave_config(dma->txchan, &dma->txconf);
211
212 /* RX buffer */
213 if (!dma->rx_size)
214 dma->rx_size = PAGE_SIZE;
215
216 dma->rx_buf = dma_alloc_coherent(dma->rxchan->device->dev, dma->rx_size,
217 &dma->rx_addr, GFP_KERNEL);
Peter Hurleyec5a11a2016-04-09 22:14:32 -0700218 if (!dma->rx_buf) {
219 ret = -ENOMEM;
Heikki Krogerusd4089a32014-04-28 15:59:56 +0300220 goto err;
Peter Hurleyec5a11a2016-04-09 22:14:32 -0700221 }
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200222
223 /* TX buffer */
224 dma->tx_addr = dma_map_single(dma->txchan->device->dev,
225 p->port.state->xmit.buf,
226 UART_XMIT_SIZE,
227 DMA_TO_DEVICE);
Heikki Krogerusd4089a32014-04-28 15:59:56 +0300228 if (dma_mapping_error(dma->txchan->device->dev, dma->tx_addr)) {
229 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size,
230 dma->rx_buf, dma->rx_addr);
Peter Hurleyec5a11a2016-04-09 22:14:32 -0700231 ret = -ENOMEM;
Heikki Krogerusd4089a32014-04-28 15:59:56 +0300232 goto err;
233 }
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200234
235 dev_dbg_ratelimited(p->port.dev, "got both dma channels\n");
236
237 return 0;
Heikki Krogerusd4089a32014-04-28 15:59:56 +0300238err:
Heikki Krogerusd4089a32014-04-28 15:59:56 +0300239 dma_release_channel(dma->txchan);
Peter Hurleyec5a11a2016-04-09 22:14:32 -0700240release_rx:
241 dma_release_channel(dma->rxchan);
242 return ret;
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200243}
244EXPORT_SYMBOL_GPL(serial8250_request_dma);
245
246void serial8250_release_dma(struct uart_8250_port *p)
247{
248 struct uart_8250_dma *dma = p->dma;
249
250 if (!dma)
251 return;
252
253 /* Release RX resources */
Andy Shevchenko8d170472016-08-17 19:20:24 +0300254 dmaengine_terminate_sync(dma->rxchan);
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200255 dma_free_coherent(dma->rxchan->device->dev, dma->rx_size, dma->rx_buf,
256 dma->rx_addr);
257 dma_release_channel(dma->rxchan);
258 dma->rxchan = NULL;
259
260 /* Release TX resources */
Andy Shevchenko8d170472016-08-17 19:20:24 +0300261 dmaengine_terminate_sync(dma->txchan);
Heikki Krogerus9ee4b832013-01-10 11:25:11 +0200262 dma_unmap_single(dma->txchan->device->dev, dma->tx_addr,
263 UART_XMIT_SIZE, DMA_TO_DEVICE);
264 dma_release_channel(dma->txchan);
265 dma->txchan = NULL;
266 dma->tx_running = 0;
267
268 dev_dbg_ratelimited(p->port.dev, "dma channels released\n");
269}
270EXPORT_SYMBOL_GPL(serial8250_release_dma);