blob: aee85238ccfcd12c547b25303acd6afc4135e71d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Driver for AMBA serial ports
3 *
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5 *
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
Russell King68b65f72010-12-22 17:24:39 +00008 * Copyright (C) 2010 ST-Ericsson SA
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
30 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
33#define SUPPORT_SYSRQ
34#endif
35
36#include <linux/module.h>
37#include <linux/ioport.h>
38#include <linux/init.h>
39#include <linux/console.h>
40#include <linux/sysrq.h>
41#include <linux/device.h>
42#include <linux/tty.h>
43#include <linux/tty_flip.h>
44#include <linux/serial_core.h>
45#include <linux/serial.h>
Russell Kinga62c80e2006-01-07 13:52:45 +000046#include <linux/amba/bus.h>
47#include <linux/amba/serial.h>
Russell Kingf8ce2542006-01-07 16:15:52 +000048#include <linux/clk.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090049#include <linux/slab.h>
Russell King68b65f72010-12-22 17:24:39 +000050#include <linux/dmaengine.h>
51#include <linux/dma-mapping.h>
52#include <linux/scatterlist.h>
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +020053#include <linux/delay.h>
Viresh Kumar258aea72012-02-01 16:12:19 +053054#include <linux/types.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
56#include <asm/io.h>
Russell Kingc6b8fda2005-10-28 14:05:16 +010057#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#define UART_NR 14
60
61#define SERIAL_AMBA_MAJOR 204
62#define SERIAL_AMBA_MINOR 64
63#define SERIAL_AMBA_NR UART_NR
64
65#define AMBA_ISR_PASS_LIMIT 256
66
Russell Kingb63d4f02005-11-19 11:10:35 +000067#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
68#define UART_DUMMY_DR_RX (1 << 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
Alessandro Rubini5926a292009-06-04 17:43:04 +010070/* There is by now at least one vendor with differing details, so handle it */
71struct vendor_data {
72 unsigned int ifls;
73 unsigned int fifosize;
Linus Walleijec489aa2010-06-02 08:13:52 +010074 unsigned int lcrh_tx;
75 unsigned int lcrh_rx;
Linus Walleijac3e3fb2010-06-02 20:40:22 +010076 bool oversampling;
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +020077 bool interrupt_may_hang; /* vendor-specific */
Russell King38d62432010-12-22 17:59:16 +000078 bool dma_threshold;
Rajanikanth H.V4fd06902012-03-26 11:17:02 +020079 bool cts_event_workaround;
Alessandro Rubini5926a292009-06-04 17:43:04 +010080};
81
82static struct vendor_data vendor_arm = {
83 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
84 .fifosize = 16,
Linus Walleijec489aa2010-06-02 08:13:52 +010085 .lcrh_tx = UART011_LCRH,
86 .lcrh_rx = UART011_LCRH,
Linus Walleijac3e3fb2010-06-02 20:40:22 +010087 .oversampling = false,
Russell King38d62432010-12-22 17:59:16 +000088 .dma_threshold = false,
Rajanikanth H.V4fd06902012-03-26 11:17:02 +020089 .cts_event_workaround = false,
Alessandro Rubini5926a292009-06-04 17:43:04 +010090};
91
92static struct vendor_data vendor_st = {
93 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
94 .fifosize = 64,
Linus Walleijec489aa2010-06-02 08:13:52 +010095 .lcrh_tx = ST_UART011_LCRH_TX,
96 .lcrh_rx = ST_UART011_LCRH_RX,
Linus Walleijac3e3fb2010-06-02 20:40:22 +010097 .oversampling = true,
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +020098 .interrupt_may_hang = true,
Russell King38d62432010-12-22 17:59:16 +000099 .dma_threshold = true,
Rajanikanth H.V4fd06902012-03-26 11:17:02 +0200100 .cts_event_workaround = true,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101};
102
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +0200103static struct uart_amba_port *amba_ports[UART_NR];
104
Russell King68b65f72010-12-22 17:24:39 +0000105/* Deals with DMA transactions */
Linus Walleijead76f32011-02-24 13:21:08 +0100106
107struct pl011_sgbuf {
108 struct scatterlist sg;
109 char *buf;
110};
111
112struct pl011_dmarx_data {
113 struct dma_chan *chan;
114 struct completion complete;
115 bool use_buf_b;
116 struct pl011_sgbuf sgbuf_a;
117 struct pl011_sgbuf sgbuf_b;
118 dma_cookie_t cookie;
119 bool running;
120};
121
Russell King68b65f72010-12-22 17:24:39 +0000122struct pl011_dmatx_data {
123 struct dma_chan *chan;
124 struct scatterlist sg;
125 char *buf;
126 bool queued;
127};
128
Russell Kingc19f12b2010-12-22 17:48:26 +0000129/*
130 * We wrap our port structure around the generic uart_port.
131 */
132struct uart_amba_port {
133 struct uart_port port;
134 struct clk *clk;
135 const struct vendor_data *vendor;
Russell King68b65f72010-12-22 17:24:39 +0000136 unsigned int dmacr; /* dma control reg */
Russell Kingc19f12b2010-12-22 17:48:26 +0000137 unsigned int im; /* interrupt mask */
138 unsigned int old_status;
Russell Kingffca2b12010-12-22 17:13:05 +0000139 unsigned int fifosize; /* vendor-specific */
Russell Kingc19f12b2010-12-22 17:48:26 +0000140 unsigned int lcrh_tx; /* vendor-specific */
141 unsigned int lcrh_rx; /* vendor-specific */
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +0530142 unsigned int old_cr; /* state during shutdown */
Russell Kingc19f12b2010-12-22 17:48:26 +0000143 bool autorts;
144 char type[12];
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +0200145 bool interrupt_may_hang; /* vendor-specific */
Russell King68b65f72010-12-22 17:24:39 +0000146#ifdef CONFIG_DMA_ENGINE
147 /* DMA stuff */
Linus Walleijead76f32011-02-24 13:21:08 +0100148 bool using_tx_dma;
149 bool using_rx_dma;
150 struct pl011_dmarx_data dmarx;
Russell King68b65f72010-12-22 17:24:39 +0000151 struct pl011_dmatx_data dmatx;
152#endif
Russell Kingc19f12b2010-12-22 17:48:26 +0000153};
154
Russell King68b65f72010-12-22 17:24:39 +0000155/*
Linus Walleij29772c42011-02-24 13:21:36 +0100156 * Reads up to 256 characters from the FIFO or until it's empty and
157 * inserts them into the TTY layer. Returns the number of characters
158 * read from the FIFO.
159 */
160static int pl011_fifo_to_tty(struct uart_amba_port *uap)
161{
162 u16 status, ch;
163 unsigned int flag, max_count = 256;
164 int fifotaken = 0;
165
166 while (max_count--) {
167 status = readw(uap->port.membase + UART01x_FR);
168 if (status & UART01x_FR_RXFE)
169 break;
170
171 /* Take chars from the FIFO and update status */
172 ch = readw(uap->port.membase + UART01x_DR) |
173 UART_DUMMY_DR_RX;
174 flag = TTY_NORMAL;
175 uap->port.icount.rx++;
176 fifotaken++;
177
178 if (unlikely(ch & UART_DR_ERROR)) {
179 if (ch & UART011_DR_BE) {
180 ch &= ~(UART011_DR_FE | UART011_DR_PE);
181 uap->port.icount.brk++;
182 if (uart_handle_break(&uap->port))
183 continue;
184 } else if (ch & UART011_DR_PE)
185 uap->port.icount.parity++;
186 else if (ch & UART011_DR_FE)
187 uap->port.icount.frame++;
188 if (ch & UART011_DR_OE)
189 uap->port.icount.overrun++;
190
191 ch &= uap->port.read_status_mask;
192
193 if (ch & UART011_DR_BE)
194 flag = TTY_BREAK;
195 else if (ch & UART011_DR_PE)
196 flag = TTY_PARITY;
197 else if (ch & UART011_DR_FE)
198 flag = TTY_FRAME;
199 }
200
201 if (uart_handle_sysrq_char(&uap->port, ch & 255))
202 continue;
203
204 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
205 }
206
207 return fifotaken;
208}
209
210
211/*
Russell King68b65f72010-12-22 17:24:39 +0000212 * All the DMA operation mode stuff goes inside this ifdef.
213 * This assumes that you have a generic DMA device interface,
214 * no custom DMA interfaces are supported.
215 */
216#ifdef CONFIG_DMA_ENGINE
217
218#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
219
Linus Walleijead76f32011-02-24 13:21:08 +0100220static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
221 enum dma_data_direction dir)
222{
223 sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
224 if (!sg->buf)
225 return -ENOMEM;
226
227 sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
228
229 if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
230 kfree(sg->buf);
231 return -EINVAL;
232 }
233 return 0;
234}
235
236static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
237 enum dma_data_direction dir)
238{
239 if (sg->buf) {
240 dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
241 kfree(sg->buf);
242 }
243}
244
Russell King68b65f72010-12-22 17:24:39 +0000245static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
246{
247 /* DMA is the sole user of the platform data right now */
248 struct amba_pl011_data *plat = uap->port.dev->platform_data;
249 struct dma_slave_config tx_conf = {
250 .dst_addr = uap->port.mapbase + UART01x_DR,
251 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
Vinod Koula485df42011-10-14 10:47:38 +0530252 .direction = DMA_MEM_TO_DEV,
Russell King68b65f72010-12-22 17:24:39 +0000253 .dst_maxburst = uap->fifosize >> 1,
Viresh Kumar258aea72012-02-01 16:12:19 +0530254 .device_fc = false,
Russell King68b65f72010-12-22 17:24:39 +0000255 };
256 struct dma_chan *chan;
257 dma_cap_mask_t mask;
258
259 /* We need platform data */
260 if (!plat || !plat->dma_filter) {
261 dev_info(uap->port.dev, "no DMA platform data\n");
262 return;
263 }
264
Linus Walleijead76f32011-02-24 13:21:08 +0100265 /* Try to acquire a generic DMA engine slave TX channel */
Russell King68b65f72010-12-22 17:24:39 +0000266 dma_cap_zero(mask);
267 dma_cap_set(DMA_SLAVE, mask);
268
269 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
270 if (!chan) {
271 dev_err(uap->port.dev, "no TX DMA channel!\n");
272 return;
273 }
274
275 dmaengine_slave_config(chan, &tx_conf);
276 uap->dmatx.chan = chan;
277
278 dev_info(uap->port.dev, "DMA channel TX %s\n",
279 dma_chan_name(uap->dmatx.chan));
Linus Walleijead76f32011-02-24 13:21:08 +0100280
281 /* Optionally make use of an RX channel as well */
282 if (plat->dma_rx_param) {
283 struct dma_slave_config rx_conf = {
284 .src_addr = uap->port.mapbase + UART01x_DR,
285 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
Vinod Koula485df42011-10-14 10:47:38 +0530286 .direction = DMA_DEV_TO_MEM,
Linus Walleijead76f32011-02-24 13:21:08 +0100287 .src_maxburst = uap->fifosize >> 1,
Viresh Kumar258aea72012-02-01 16:12:19 +0530288 .device_fc = false,
Linus Walleijead76f32011-02-24 13:21:08 +0100289 };
290
291 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
292 if (!chan) {
293 dev_err(uap->port.dev, "no RX DMA channel!\n");
294 return;
295 }
296
297 dmaengine_slave_config(chan, &rx_conf);
298 uap->dmarx.chan = chan;
299
300 dev_info(uap->port.dev, "DMA channel RX %s\n",
301 dma_chan_name(uap->dmarx.chan));
302 }
Russell King68b65f72010-12-22 17:24:39 +0000303}
304
305#ifndef MODULE
306/*
307 * Stack up the UARTs and let the above initcall be done at device
308 * initcall time, because the serial driver is called as an arch
309 * initcall, and at this time the DMA subsystem is not yet registered.
310 * At this point the driver will switch over to using DMA where desired.
311 */
312struct dma_uap {
313 struct list_head node;
314 struct uart_amba_port *uap;
315};
316
317static LIST_HEAD(pl011_dma_uarts);
318
319static int __init pl011_dma_initcall(void)
320{
321 struct list_head *node, *tmp;
322
323 list_for_each_safe(node, tmp, &pl011_dma_uarts) {
324 struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
325 pl011_dma_probe_initcall(dmau->uap);
326 list_del(node);
327 kfree(dmau);
328 }
329 return 0;
330}
331
332device_initcall(pl011_dma_initcall);
333
334static void pl011_dma_probe(struct uart_amba_port *uap)
335{
336 struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
337 if (dmau) {
338 dmau->uap = uap;
339 list_add_tail(&dmau->node, &pl011_dma_uarts);
340 }
341}
342#else
343static void pl011_dma_probe(struct uart_amba_port *uap)
344{
345 pl011_dma_probe_initcall(uap);
346}
347#endif
348
349static void pl011_dma_remove(struct uart_amba_port *uap)
350{
351 /* TODO: remove the initcall if it has not yet executed */
352 if (uap->dmatx.chan)
353 dma_release_channel(uap->dmatx.chan);
Linus Walleijead76f32011-02-24 13:21:08 +0100354 if (uap->dmarx.chan)
355 dma_release_channel(uap->dmarx.chan);
Russell King68b65f72010-12-22 17:24:39 +0000356}
357
Russell King68b65f72010-12-22 17:24:39 +0000358/* Forward declare this for the refill routine */
359static int pl011_dma_tx_refill(struct uart_amba_port *uap);
360
361/*
362 * The current DMA TX buffer has been sent.
363 * Try to queue up another DMA buffer.
364 */
365static void pl011_dma_tx_callback(void *data)
366{
367 struct uart_amba_port *uap = data;
368 struct pl011_dmatx_data *dmatx = &uap->dmatx;
369 unsigned long flags;
370 u16 dmacr;
371
372 spin_lock_irqsave(&uap->port.lock, flags);
373 if (uap->dmatx.queued)
374 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
375 DMA_TO_DEVICE);
376
377 dmacr = uap->dmacr;
378 uap->dmacr = dmacr & ~UART011_TXDMAE;
379 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
380
381 /*
382 * If TX DMA was disabled, it means that we've stopped the DMA for
383 * some reason (eg, XOFF received, or we want to send an X-char.)
384 *
385 * Note: we need to be careful here of a potential race between DMA
386 * and the rest of the driver - if the driver disables TX DMA while
387 * a TX buffer completing, we must update the tx queued status to
388 * get further refills (hence we check dmacr).
389 */
390 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
391 uart_circ_empty(&uap->port.state->xmit)) {
392 uap->dmatx.queued = false;
393 spin_unlock_irqrestore(&uap->port.lock, flags);
394 return;
395 }
396
397 if (pl011_dma_tx_refill(uap) <= 0) {
398 /*
399 * We didn't queue a DMA buffer for some reason, but we
400 * have data pending to be sent. Re-enable the TX IRQ.
401 */
402 uap->im |= UART011_TXIM;
403 writew(uap->im, uap->port.membase + UART011_IMSC);
404 }
405 spin_unlock_irqrestore(&uap->port.lock, flags);
406}
407
408/*
409 * Try to refill the TX DMA buffer.
410 * Locking: called with port lock held and IRQs disabled.
411 * Returns:
412 * 1 if we queued up a TX DMA buffer.
413 * 0 if we didn't want to handle this by DMA
414 * <0 on error
415 */
416static int pl011_dma_tx_refill(struct uart_amba_port *uap)
417{
418 struct pl011_dmatx_data *dmatx = &uap->dmatx;
419 struct dma_chan *chan = dmatx->chan;
420 struct dma_device *dma_dev = chan->device;
421 struct dma_async_tx_descriptor *desc;
422 struct circ_buf *xmit = &uap->port.state->xmit;
423 unsigned int count;
424
425 /*
426 * Try to avoid the overhead involved in using DMA if the
427 * transaction fits in the first half of the FIFO, by using
428 * the standard interrupt handling. This ensures that we
429 * issue a uart_write_wakeup() at the appropriate time.
430 */
431 count = uart_circ_chars_pending(xmit);
432 if (count < (uap->fifosize >> 1)) {
433 uap->dmatx.queued = false;
434 return 0;
435 }
436
437 /*
438 * Bodge: don't send the last character by DMA, as this
439 * will prevent XON from notifying us to restart DMA.
440 */
441 count -= 1;
442
443 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
444 if (count > PL011_DMA_BUFFER_SIZE)
445 count = PL011_DMA_BUFFER_SIZE;
446
447 if (xmit->tail < xmit->head)
448 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
449 else {
450 size_t first = UART_XMIT_SIZE - xmit->tail;
451 size_t second = xmit->head;
452
453 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
454 if (second)
455 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
456 }
457
458 dmatx->sg.length = count;
459
460 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
461 uap->dmatx.queued = false;
462 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
463 return -EBUSY;
464 }
465
Alexandre Bounine16052822012-03-08 16:11:18 -0500466 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
Russell King68b65f72010-12-22 17:24:39 +0000467 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
468 if (!desc) {
469 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
470 uap->dmatx.queued = false;
471 /*
472 * If DMA cannot be used right now, we complete this
473 * transaction via IRQ and let the TTY layer retry.
474 */
475 dev_dbg(uap->port.dev, "TX DMA busy\n");
476 return -EBUSY;
477 }
478
479 /* Some data to go along to the callback */
480 desc->callback = pl011_dma_tx_callback;
481 desc->callback_param = uap;
482
483 /* All errors should happen at prepare time */
484 dmaengine_submit(desc);
485
486 /* Fire the DMA transaction */
487 dma_dev->device_issue_pending(chan);
488
489 uap->dmacr |= UART011_TXDMAE;
490 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
491 uap->dmatx.queued = true;
492
493 /*
494 * Now we know that DMA will fire, so advance the ring buffer
495 * with the stuff we just dispatched.
496 */
497 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
498 uap->port.icount.tx += count;
499
500 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
501 uart_write_wakeup(&uap->port);
502
503 return 1;
504}
505
506/*
507 * We received a transmit interrupt without a pending X-char but with
508 * pending characters.
509 * Locking: called with port lock held and IRQs disabled.
510 * Returns:
511 * false if we want to use PIO to transmit
512 * true if we queued a DMA buffer
513 */
514static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
515{
Linus Walleijead76f32011-02-24 13:21:08 +0100516 if (!uap->using_tx_dma)
Russell King68b65f72010-12-22 17:24:39 +0000517 return false;
518
519 /*
520 * If we already have a TX buffer queued, but received a
521 * TX interrupt, it will be because we've just sent an X-char.
522 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
523 */
524 if (uap->dmatx.queued) {
525 uap->dmacr |= UART011_TXDMAE;
526 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
527 uap->im &= ~UART011_TXIM;
528 writew(uap->im, uap->port.membase + UART011_IMSC);
529 return true;
530 }
531
532 /*
533 * We don't have a TX buffer queued, so try to queue one.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300534 * If we successfully queued a buffer, mask the TX IRQ.
Russell King68b65f72010-12-22 17:24:39 +0000535 */
536 if (pl011_dma_tx_refill(uap) > 0) {
537 uap->im &= ~UART011_TXIM;
538 writew(uap->im, uap->port.membase + UART011_IMSC);
539 return true;
540 }
541 return false;
542}
543
544/*
545 * Stop the DMA transmit (eg, due to received XOFF).
546 * Locking: called with port lock held and IRQs disabled.
547 */
548static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
549{
550 if (uap->dmatx.queued) {
551 uap->dmacr &= ~UART011_TXDMAE;
552 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
553 }
554}
555
556/*
557 * Try to start a DMA transmit, or in the case of an XON/OFF
558 * character queued for send, try to get that character out ASAP.
559 * Locking: called with port lock held and IRQs disabled.
560 * Returns:
561 * false if we want the TX IRQ to be enabled
562 * true if we have a buffer queued
563 */
564static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
565{
566 u16 dmacr;
567
Linus Walleijead76f32011-02-24 13:21:08 +0100568 if (!uap->using_tx_dma)
Russell King68b65f72010-12-22 17:24:39 +0000569 return false;
570
571 if (!uap->port.x_char) {
572 /* no X-char, try to push chars out in DMA mode */
573 bool ret = true;
574
575 if (!uap->dmatx.queued) {
576 if (pl011_dma_tx_refill(uap) > 0) {
577 uap->im &= ~UART011_TXIM;
578 ret = true;
579 } else {
580 uap->im |= UART011_TXIM;
581 ret = false;
582 }
583 writew(uap->im, uap->port.membase + UART011_IMSC);
584 } else if (!(uap->dmacr & UART011_TXDMAE)) {
585 uap->dmacr |= UART011_TXDMAE;
586 writew(uap->dmacr,
587 uap->port.membase + UART011_DMACR);
588 }
589 return ret;
590 }
591
592 /*
593 * We have an X-char to send. Disable DMA to prevent it loading
594 * the TX fifo, and then see if we can stuff it into the FIFO.
595 */
596 dmacr = uap->dmacr;
597 uap->dmacr &= ~UART011_TXDMAE;
598 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
599
600 if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF) {
601 /*
602 * No space in the FIFO, so enable the transmit interrupt
603 * so we know when there is space. Note that once we've
604 * loaded the character, we should just re-enable DMA.
605 */
606 return false;
607 }
608
609 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
610 uap->port.icount.tx++;
611 uap->port.x_char = 0;
612
613 /* Success - restore the DMA state */
614 uap->dmacr = dmacr;
615 writew(dmacr, uap->port.membase + UART011_DMACR);
616
617 return true;
618}
619
620/*
621 * Flush the transmit buffer.
622 * Locking: called with port lock held and IRQs disabled.
623 */
624static void pl011_dma_flush_buffer(struct uart_port *port)
625{
626 struct uart_amba_port *uap = (struct uart_amba_port *)port;
627
Linus Walleijead76f32011-02-24 13:21:08 +0100628 if (!uap->using_tx_dma)
Russell King68b65f72010-12-22 17:24:39 +0000629 return;
630
631 /* Avoid deadlock with the DMA engine callback */
632 spin_unlock(&uap->port.lock);
633 dmaengine_terminate_all(uap->dmatx.chan);
634 spin_lock(&uap->port.lock);
635 if (uap->dmatx.queued) {
636 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
637 DMA_TO_DEVICE);
638 uap->dmatx.queued = false;
639 uap->dmacr &= ~UART011_TXDMAE;
640 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
641 }
642}
643
Linus Walleijead76f32011-02-24 13:21:08 +0100644static void pl011_dma_rx_callback(void *data);
645
646static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
647{
648 struct dma_chan *rxchan = uap->dmarx.chan;
Linus Walleijead76f32011-02-24 13:21:08 +0100649 struct pl011_dmarx_data *dmarx = &uap->dmarx;
650 struct dma_async_tx_descriptor *desc;
651 struct pl011_sgbuf *sgbuf;
652
653 if (!rxchan)
654 return -EIO;
655
656 /* Start the RX DMA job */
657 sgbuf = uap->dmarx.use_buf_b ?
658 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
Alexandre Bounine16052822012-03-08 16:11:18 -0500659 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
Vinod Koula485df42011-10-14 10:47:38 +0530660 DMA_DEV_TO_MEM,
Linus Walleijead76f32011-02-24 13:21:08 +0100661 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
662 /*
663 * If the DMA engine is busy and cannot prepare a
664 * channel, no big deal, the driver will fall back
665 * to interrupt mode as a result of this error code.
666 */
667 if (!desc) {
668 uap->dmarx.running = false;
669 dmaengine_terminate_all(rxchan);
670 return -EBUSY;
671 }
672
673 /* Some data to go along to the callback */
674 desc->callback = pl011_dma_rx_callback;
675 desc->callback_param = uap;
676 dmarx->cookie = dmaengine_submit(desc);
677 dma_async_issue_pending(rxchan);
678
679 uap->dmacr |= UART011_RXDMAE;
680 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
681 uap->dmarx.running = true;
682
683 uap->im &= ~UART011_RXIM;
684 writew(uap->im, uap->port.membase + UART011_IMSC);
685
686 return 0;
687}
688
689/*
690 * This is called when either the DMA job is complete, or
691 * the FIFO timeout interrupt occurred. This must be called
692 * with the port spinlock uap->port.lock held.
693 */
694static void pl011_dma_rx_chars(struct uart_amba_port *uap,
695 u32 pending, bool use_buf_b,
696 bool readfifo)
697{
698 struct tty_struct *tty = uap->port.state->port.tty;
699 struct pl011_sgbuf *sgbuf = use_buf_b ?
700 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
701 struct device *dev = uap->dmarx.chan->device->dev;
Linus Walleijead76f32011-02-24 13:21:08 +0100702 int dma_count = 0;
703 u32 fifotaken = 0; /* only used for vdbg() */
704
705 /* Pick everything from the DMA first */
706 if (pending) {
707 /* Sync in buffer */
708 dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
709
710 /*
711 * First take all chars in the DMA pipe, then look in the FIFO.
712 * Note that tty_insert_flip_buf() tries to take as many chars
713 * as it can.
714 */
715 dma_count = tty_insert_flip_string(uap->port.state->port.tty,
716 sgbuf->buf, pending);
717
718 /* Return buffer to device */
719 dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
720
721 uap->port.icount.rx += dma_count;
722 if (dma_count < pending)
723 dev_warn(uap->port.dev,
724 "couldn't insert all characters (TTY is full?)\n");
725 }
726
727 /*
728 * Only continue with trying to read the FIFO if all DMA chars have
729 * been taken first.
730 */
731 if (dma_count == pending && readfifo) {
732 /* Clear any error flags */
733 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
734 uap->port.membase + UART011_ICR);
735
736 /*
737 * If we read all the DMA'd characters, and we had an
Linus Walleij29772c42011-02-24 13:21:36 +0100738 * incomplete buffer, that could be due to an rx error, or
739 * maybe we just timed out. Read any pending chars and check
740 * the error status.
741 *
742 * Error conditions will only occur in the FIFO, these will
743 * trigger an immediate interrupt and stop the DMA job, so we
744 * will always find the error in the FIFO, never in the DMA
745 * buffer.
Linus Walleijead76f32011-02-24 13:21:08 +0100746 */
Linus Walleij29772c42011-02-24 13:21:36 +0100747 fifotaken = pl011_fifo_to_tty(uap);
Linus Walleijead76f32011-02-24 13:21:08 +0100748 }
749
750 spin_unlock(&uap->port.lock);
751 dev_vdbg(uap->port.dev,
752 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
753 dma_count, fifotaken);
754 tty_flip_buffer_push(tty);
755 spin_lock(&uap->port.lock);
756}
757
758static void pl011_dma_rx_irq(struct uart_amba_port *uap)
759{
760 struct pl011_dmarx_data *dmarx = &uap->dmarx;
761 struct dma_chan *rxchan = dmarx->chan;
762 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
763 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
764 size_t pending;
765 struct dma_tx_state state;
766 enum dma_status dmastat;
767
768 /*
769 * Pause the transfer so we can trust the current counter,
770 * do this before we pause the PL011 block, else we may
771 * overflow the FIFO.
772 */
773 if (dmaengine_pause(rxchan))
774 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
775 dmastat = rxchan->device->device_tx_status(rxchan,
776 dmarx->cookie, &state);
777 if (dmastat != DMA_PAUSED)
778 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
779
780 /* Disable RX DMA - incoming data will wait in the FIFO */
781 uap->dmacr &= ~UART011_RXDMAE;
782 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
783 uap->dmarx.running = false;
784
785 pending = sgbuf->sg.length - state.residue;
786 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
787 /* Then we terminate the transfer - we now know our residue */
788 dmaengine_terminate_all(rxchan);
789
790 /*
791 * This will take the chars we have so far and insert
792 * into the framework.
793 */
794 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
795
796 /* Switch buffer & re-trigger DMA job */
797 dmarx->use_buf_b = !dmarx->use_buf_b;
798 if (pl011_dma_rx_trigger_dma(uap)) {
799 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
800 "fall back to interrupt mode\n");
801 uap->im |= UART011_RXIM;
802 writew(uap->im, uap->port.membase + UART011_IMSC);
803 }
804}
805
806static void pl011_dma_rx_callback(void *data)
807{
808 struct uart_amba_port *uap = data;
809 struct pl011_dmarx_data *dmarx = &uap->dmarx;
Chanho Min6dc01aa2012-02-20 10:24:40 +0900810 struct dma_chan *rxchan = dmarx->chan;
Linus Walleijead76f32011-02-24 13:21:08 +0100811 bool lastbuf = dmarx->use_buf_b;
Chanho Min6dc01aa2012-02-20 10:24:40 +0900812 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
813 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
814 size_t pending;
815 struct dma_tx_state state;
Linus Walleijead76f32011-02-24 13:21:08 +0100816 int ret;
817
818 /*
819 * This completion interrupt occurs typically when the
820 * RX buffer is totally stuffed but no timeout has yet
821 * occurred. When that happens, we just want the RX
822 * routine to flush out the secondary DMA buffer while
823 * we immediately trigger the next DMA job.
824 */
825 spin_lock_irq(&uap->port.lock);
Chanho Min6dc01aa2012-02-20 10:24:40 +0900826 /*
827 * Rx data can be taken by the UART interrupts during
828 * the DMA irq handler. So we check the residue here.
829 */
830 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
831 pending = sgbuf->sg.length - state.residue;
832 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
833 /* Then we terminate the transfer - we now know our residue */
834 dmaengine_terminate_all(rxchan);
835
Linus Walleijead76f32011-02-24 13:21:08 +0100836 uap->dmarx.running = false;
837 dmarx->use_buf_b = !lastbuf;
838 ret = pl011_dma_rx_trigger_dma(uap);
839
Chanho Min6dc01aa2012-02-20 10:24:40 +0900840 pl011_dma_rx_chars(uap, pending, lastbuf, false);
Linus Walleijead76f32011-02-24 13:21:08 +0100841 spin_unlock_irq(&uap->port.lock);
842 /*
843 * Do this check after we picked the DMA chars so we don't
844 * get some IRQ immediately from RX.
845 */
846 if (ret) {
847 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
848 "fall back to interrupt mode\n");
849 uap->im |= UART011_RXIM;
850 writew(uap->im, uap->port.membase + UART011_IMSC);
851 }
852}
853
854/*
855 * Stop accepting received characters, when we're shutting down or
856 * suspending this port.
857 * Locking: called with port lock held and IRQs disabled.
858 */
859static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
860{
861 /* FIXME. Just disable the DMA enable */
862 uap->dmacr &= ~UART011_RXDMAE;
863 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
864}
Russell King68b65f72010-12-22 17:24:39 +0000865
866static void pl011_dma_startup(struct uart_amba_port *uap)
867{
Linus Walleijead76f32011-02-24 13:21:08 +0100868 int ret;
869
Russell King68b65f72010-12-22 17:24:39 +0000870 if (!uap->dmatx.chan)
871 return;
872
873 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
874 if (!uap->dmatx.buf) {
875 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
876 uap->port.fifosize = uap->fifosize;
877 return;
878 }
879
880 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
881
882 /* The DMA buffer is now the FIFO the TTY subsystem can use */
883 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
Linus Walleijead76f32011-02-24 13:21:08 +0100884 uap->using_tx_dma = true;
Russell King68b65f72010-12-22 17:24:39 +0000885
Linus Walleijead76f32011-02-24 13:21:08 +0100886 if (!uap->dmarx.chan)
887 goto skip_rx;
888
889 /* Allocate and map DMA RX buffers */
890 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
891 DMA_FROM_DEVICE);
892 if (ret) {
893 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
894 "RX buffer A", ret);
895 goto skip_rx;
896 }
897
898 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
899 DMA_FROM_DEVICE);
900 if (ret) {
901 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
902 "RX buffer B", ret);
903 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
904 DMA_FROM_DEVICE);
905 goto skip_rx;
906 }
907
908 uap->using_rx_dma = true;
909
910skip_rx:
Russell King68b65f72010-12-22 17:24:39 +0000911 /* Turn on DMA error (RX/TX will be enabled on demand) */
912 uap->dmacr |= UART011_DMAONERR;
913 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
Russell King38d62432010-12-22 17:59:16 +0000914
915 /*
916 * ST Micro variants has some specific dma burst threshold
917 * compensation. Set this to 16 bytes, so burst will only
918 * be issued above/below 16 bytes.
919 */
920 if (uap->vendor->dma_threshold)
921 writew(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
922 uap->port.membase + ST_UART011_DMAWM);
Linus Walleijead76f32011-02-24 13:21:08 +0100923
924 if (uap->using_rx_dma) {
925 if (pl011_dma_rx_trigger_dma(uap))
926 dev_dbg(uap->port.dev, "could not trigger initial "
927 "RX DMA job, fall back to interrupt mode\n");
928 }
Russell King68b65f72010-12-22 17:24:39 +0000929}
930
931static void pl011_dma_shutdown(struct uart_amba_port *uap)
932{
Linus Walleijead76f32011-02-24 13:21:08 +0100933 if (!(uap->using_tx_dma || uap->using_rx_dma))
Russell King68b65f72010-12-22 17:24:39 +0000934 return;
935
936 /* Disable RX and TX DMA */
937 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
938 barrier();
939
940 spin_lock_irq(&uap->port.lock);
941 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
942 writew(uap->dmacr, uap->port.membase + UART011_DMACR);
943 spin_unlock_irq(&uap->port.lock);
944
Linus Walleijead76f32011-02-24 13:21:08 +0100945 if (uap->using_tx_dma) {
946 /* In theory, this should already be done by pl011_dma_flush_buffer */
947 dmaengine_terminate_all(uap->dmatx.chan);
948 if (uap->dmatx.queued) {
949 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
950 DMA_TO_DEVICE);
951 uap->dmatx.queued = false;
952 }
953
954 kfree(uap->dmatx.buf);
955 uap->using_tx_dma = false;
Russell King68b65f72010-12-22 17:24:39 +0000956 }
957
Linus Walleijead76f32011-02-24 13:21:08 +0100958 if (uap->using_rx_dma) {
959 dmaengine_terminate_all(uap->dmarx.chan);
960 /* Clean up the RX DMA */
961 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
962 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
963 uap->using_rx_dma = false;
964 }
Russell King68b65f72010-12-22 17:24:39 +0000965}
966
Linus Walleijead76f32011-02-24 13:21:08 +0100967static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
968{
969 return uap->using_rx_dma;
970}
971
972static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
973{
974 return uap->using_rx_dma && uap->dmarx.running;
975}
976
977
Russell King68b65f72010-12-22 17:24:39 +0000978#else
979/* Blank functions if the DMA engine is not available */
980static inline void pl011_dma_probe(struct uart_amba_port *uap)
981{
982}
983
984static inline void pl011_dma_remove(struct uart_amba_port *uap)
985{
986}
987
988static inline void pl011_dma_startup(struct uart_amba_port *uap)
989{
990}
991
992static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
993{
994}
995
996static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
997{
998 return false;
999}
1000
1001static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1002{
1003}
1004
1005static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1006{
1007 return false;
1008}
1009
Linus Walleijead76f32011-02-24 13:21:08 +01001010static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1011{
1012}
1013
1014static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1015{
1016}
1017
1018static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1019{
1020 return -EIO;
1021}
1022
1023static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1024{
1025 return false;
1026}
1027
1028static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1029{
1030 return false;
1031}
1032
Russell King68b65f72010-12-22 17:24:39 +00001033#define pl011_dma_flush_buffer NULL
1034#endif
1035
Russell Kingb129a8c2005-08-31 10:12:14 +01001036static void pl011_stop_tx(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
1038 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1039
1040 uap->im &= ~UART011_TXIM;
1041 writew(uap->im, uap->port.membase + UART011_IMSC);
Russell King68b65f72010-12-22 17:24:39 +00001042 pl011_dma_tx_stop(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043}
1044
Russell Kingb129a8c2005-08-31 10:12:14 +01001045static void pl011_start_tx(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046{
1047 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1048
Russell King68b65f72010-12-22 17:24:39 +00001049 if (!pl011_dma_tx_start(uap)) {
1050 uap->im |= UART011_TXIM;
1051 writew(uap->im, uap->port.membase + UART011_IMSC);
1052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053}
1054
1055static void pl011_stop_rx(struct uart_port *port)
1056{
1057 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1058
1059 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1060 UART011_PEIM|UART011_BEIM|UART011_OEIM);
1061 writew(uap->im, uap->port.membase + UART011_IMSC);
Linus Walleijead76f32011-02-24 13:21:08 +01001062
1063 pl011_dma_rx_stop(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064}
1065
1066static void pl011_enable_ms(struct uart_port *port)
1067{
1068 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1069
1070 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
1071 writew(uap->im, uap->port.membase + UART011_IMSC);
1072}
1073
David Howells7d12e782006-10-05 14:55:46 +01001074static void pl011_rx_chars(struct uart_amba_port *uap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075{
Alan Coxebd2c8f2009-09-19 13:13:28 -07001076 struct tty_struct *tty = uap->port.state->port.tty;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
Linus Walleij29772c42011-02-24 13:21:36 +01001078 pl011_fifo_to_tty(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079
Thomas Gleixner2389b272007-05-29 21:53:50 +01001080 spin_unlock(&uap->port.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 tty_flip_buffer_push(tty);
Linus Walleijead76f32011-02-24 13:21:08 +01001082 /*
1083 * If we were temporarily out of DMA mode for a while,
1084 * attempt to switch back to DMA mode again.
1085 */
1086 if (pl011_dma_rx_available(uap)) {
1087 if (pl011_dma_rx_trigger_dma(uap)) {
1088 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1089 "fall back to interrupt mode again\n");
1090 uap->im |= UART011_RXIM;
1091 } else
1092 uap->im &= ~UART011_RXIM;
1093 writew(uap->im, uap->port.membase + UART011_IMSC);
1094 }
Thomas Gleixner2389b272007-05-29 21:53:50 +01001095 spin_lock(&uap->port.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096}
1097
1098static void pl011_tx_chars(struct uart_amba_port *uap)
1099{
Alan Coxebd2c8f2009-09-19 13:13:28 -07001100 struct circ_buf *xmit = &uap->port.state->xmit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 int count;
1102
1103 if (uap->port.x_char) {
1104 writew(uap->port.x_char, uap->port.membase + UART01x_DR);
1105 uap->port.icount.tx++;
1106 uap->port.x_char = 0;
1107 return;
1108 }
1109 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
Russell Kingb129a8c2005-08-31 10:12:14 +01001110 pl011_stop_tx(&uap->port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 return;
1112 }
1113
Russell King68b65f72010-12-22 17:24:39 +00001114 /* If we are using DMA mode, try to send some characters. */
1115 if (pl011_dma_tx_irq(uap))
1116 return;
1117
Russell Kingffca2b12010-12-22 17:13:05 +00001118 count = uap->fifosize >> 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 do {
1120 writew(xmit->buf[xmit->tail], uap->port.membase + UART01x_DR);
1121 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1122 uap->port.icount.tx++;
1123 if (uart_circ_empty(xmit))
1124 break;
1125 } while (--count > 0);
1126
1127 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1128 uart_write_wakeup(&uap->port);
1129
1130 if (uart_circ_empty(xmit))
Russell Kingb129a8c2005-08-31 10:12:14 +01001131 pl011_stop_tx(&uap->port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132}
1133
1134static void pl011_modem_status(struct uart_amba_port *uap)
1135{
1136 unsigned int status, delta;
1137
1138 status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1139
1140 delta = status ^ uap->old_status;
1141 uap->old_status = status;
1142
1143 if (!delta)
1144 return;
1145
1146 if (delta & UART01x_FR_DCD)
1147 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1148
1149 if (delta & UART01x_FR_DSR)
1150 uap->port.icount.dsr++;
1151
1152 if (delta & UART01x_FR_CTS)
1153 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
1154
Alan Coxbdc04e32009-09-19 13:13:31 -07001155 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156}
1157
David Howells7d12e782006-10-05 14:55:46 +01001158static irqreturn_t pl011_int(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159{
1160 struct uart_amba_port *uap = dev_id;
Russell King963cc982010-12-22 17:16:09 +00001161 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
1163 int handled = 0;
Rajanikanth H.V4fd06902012-03-26 11:17:02 +02001164 unsigned int dummy_read;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
Russell King963cc982010-12-22 17:16:09 +00001166 spin_lock_irqsave(&uap->port.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 status = readw(uap->port.membase + UART011_MIS);
1169 if (status) {
1170 do {
Rajanikanth H.V4fd06902012-03-26 11:17:02 +02001171 if (uap->vendor->cts_event_workaround) {
1172 /* workaround to make sure that all bits are unlocked.. */
1173 writew(0x00, uap->port.membase + UART011_ICR);
1174
1175 /*
1176 * WA: introduce 26ns(1 uart clk) delay before W1C;
1177 * single apb access will incur 2 pclk(133.12Mhz) delay,
1178 * so add 2 dummy reads
1179 */
1180 dummy_read = readw(uap->port.membase + UART011_ICR);
1181 dummy_read = readw(uap->port.membase + UART011_ICR);
1182 }
1183
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 writew(status & ~(UART011_TXIS|UART011_RTIS|
1185 UART011_RXIS),
1186 uap->port.membase + UART011_ICR);
1187
Linus Walleijead76f32011-02-24 13:21:08 +01001188 if (status & (UART011_RTIS|UART011_RXIS)) {
1189 if (pl011_dma_rx_running(uap))
1190 pl011_dma_rx_irq(uap);
1191 else
1192 pl011_rx_chars(uap);
1193 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1195 UART011_CTSMIS|UART011_RIMIS))
1196 pl011_modem_status(uap);
1197 if (status & UART011_TXIS)
1198 pl011_tx_chars(uap);
1199
Rajanikanth H.V4fd06902012-03-26 11:17:02 +02001200 if (pass_counter-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 break;
1202
1203 status = readw(uap->port.membase + UART011_MIS);
1204 } while (status != 0);
1205 handled = 1;
1206 }
1207
Russell King963cc982010-12-22 17:16:09 +00001208 spin_unlock_irqrestore(&uap->port.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
1210 return IRQ_RETVAL(handled);
1211}
1212
1213static unsigned int pl01x_tx_empty(struct uart_port *port)
1214{
1215 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1216 unsigned int status = readw(uap->port.membase + UART01x_FR);
1217 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
1218}
1219
1220static unsigned int pl01x_get_mctrl(struct uart_port *port)
1221{
1222 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1223 unsigned int result = 0;
1224 unsigned int status = readw(uap->port.membase + UART01x_FR);
1225
Jiri Slaby5159f402007-10-18 23:40:31 -07001226#define TIOCMBIT(uartbit, tiocmbit) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 if (status & uartbit) \
1228 result |= tiocmbit
1229
Jiri Slaby5159f402007-10-18 23:40:31 -07001230 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
1231 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1232 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1233 TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
1234#undef TIOCMBIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 return result;
1236}
1237
1238static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1239{
1240 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1241 unsigned int cr;
1242
1243 cr = readw(uap->port.membase + UART011_CR);
1244
Jiri Slaby5159f402007-10-18 23:40:31 -07001245#define TIOCMBIT(tiocmbit, uartbit) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 if (mctrl & tiocmbit) \
1247 cr |= uartbit; \
1248 else \
1249 cr &= ~uartbit
1250
Jiri Slaby5159f402007-10-18 23:40:31 -07001251 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1252 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1253 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1254 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1255 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
Rabin Vincent3b438162010-02-12 06:43:11 +01001256
1257 if (uap->autorts) {
1258 /* We need to disable auto-RTS if we want to turn RTS off */
1259 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1260 }
Jiri Slaby5159f402007-10-18 23:40:31 -07001261#undef TIOCMBIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263 writew(cr, uap->port.membase + UART011_CR);
1264}
1265
1266static void pl011_break_ctl(struct uart_port *port, int break_state)
1267{
1268 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1269 unsigned long flags;
1270 unsigned int lcr_h;
1271
1272 spin_lock_irqsave(&uap->port.lock, flags);
Linus Walleijec489aa2010-06-02 08:13:52 +01001273 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 if (break_state == -1)
1275 lcr_h |= UART01x_LCRH_BRK;
1276 else
1277 lcr_h &= ~UART01x_LCRH_BRK;
Linus Walleijec489aa2010-06-02 08:13:52 +01001278 writew(lcr_h, uap->port.membase + uap->lcrh_tx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 spin_unlock_irqrestore(&uap->port.lock, flags);
1280}
1281
Jason Wessel84b5ae12008-02-20 13:33:39 -06001282#ifdef CONFIG_CONSOLE_POLL
1283static int pl010_get_poll_char(struct uart_port *port)
1284{
1285 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1286 unsigned int status;
1287
Jason Wesself5316b42010-05-20 21:04:22 -05001288 status = readw(uap->port.membase + UART01x_FR);
1289 if (status & UART01x_FR_RXFE)
1290 return NO_POLL_CHAR;
Jason Wessel84b5ae12008-02-20 13:33:39 -06001291
1292 return readw(uap->port.membase + UART01x_DR);
1293}
1294
1295static void pl010_put_poll_char(struct uart_port *port,
1296 unsigned char ch)
1297{
1298 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1299
1300 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1301 barrier();
1302
1303 writew(ch, uap->port.membase + UART01x_DR);
1304}
1305
1306#endif /* CONFIG_CONSOLE_POLL */
1307
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308static int pl011_startup(struct uart_port *port)
1309{
1310 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1311 unsigned int cr;
1312 int retval;
1313
Russell King4b4851c2011-09-22 11:35:30 +01001314 retval = clk_prepare(uap->clk);
1315 if (retval)
1316 goto out;
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 /*
1319 * Try to enable the clock producer.
1320 */
1321 retval = clk_enable(uap->clk);
1322 if (retval)
Russell King4b4851c2011-09-22 11:35:30 +01001323 goto clk_unprep;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
1325 uap->port.uartclk = clk_get_rate(uap->clk);
1326
Linus Walleij9b96fba2012-03-13 13:27:23 +01001327 /* Clear pending error and receive interrupts */
1328 writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
1329 UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
1330
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 /*
1332 * Allocate the IRQ
1333 */
1334 retval = request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1335 if (retval)
1336 goto clk_dis;
1337
Russell Kingc19f12b2010-12-22 17:48:26 +00001338 writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340 /*
1341 * Provoke TX FIFO interrupt into asserting.
1342 */
1343 cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
1344 writew(cr, uap->port.membase + UART011_CR);
1345 writew(0, uap->port.membase + UART011_FBRD);
1346 writew(1, uap->port.membase + UART011_IBRD);
Linus Walleijec489aa2010-06-02 08:13:52 +01001347 writew(0, uap->port.membase + uap->lcrh_rx);
1348 if (uap->lcrh_tx != uap->lcrh_rx) {
1349 int i;
1350 /*
1351 * Wait 10 PCLKs before writing LCRH_TX register,
1352 * to get this delay write read only register 10 times
1353 */
1354 for (i = 0; i < 10; ++i)
1355 writew(0xff, uap->port.membase + UART011_MIS);
1356 writew(0, uap->port.membase + uap->lcrh_tx);
1357 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 writew(0, uap->port.membase + UART01x_DR);
1359 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
1360 barrier();
1361
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301362 /* restore RTS and DTR */
1363 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1364 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 writew(cr, uap->port.membase + UART011_CR);
1366
1367 /*
1368 * initialise the old status of the modem signals
1369 */
1370 uap->old_status = readw(uap->port.membase + UART01x_FR) & UART01x_FR_MODEM_ANY;
1371
Russell King68b65f72010-12-22 17:24:39 +00001372 /* Startup DMA */
1373 pl011_dma_startup(uap);
1374
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 /*
Linus Walleijead76f32011-02-24 13:21:08 +01001376 * Finally, enable interrupts, only timeouts when using DMA
1377 * if initial RX DMA job failed, start in interrupt mode
1378 * as well.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 */
1380 spin_lock_irq(&uap->port.lock);
Linus Walleij9b96fba2012-03-13 13:27:23 +01001381 /* Clear out any spuriously appearing RX interrupts */
1382 writew(UART011_RTIS | UART011_RXIS,
1383 uap->port.membase + UART011_ICR);
Linus Walleijead76f32011-02-24 13:21:08 +01001384 uap->im = UART011_RTIM;
1385 if (!pl011_dma_rx_running(uap))
1386 uap->im |= UART011_RXIM;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 writew(uap->im, uap->port.membase + UART011_IMSC);
1388 spin_unlock_irq(&uap->port.lock);
1389
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02001390 if (uap->port.dev->platform_data) {
1391 struct amba_pl011_data *plat;
1392
1393 plat = uap->port.dev->platform_data;
1394 if (plat->init)
1395 plat->init();
1396 }
1397
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 return 0;
1399
1400 clk_dis:
1401 clk_disable(uap->clk);
Russell King4b4851c2011-09-22 11:35:30 +01001402 clk_unprep:
1403 clk_unprepare(uap->clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 out:
1405 return retval;
1406}
1407
Linus Walleijec489aa2010-06-02 08:13:52 +01001408static void pl011_shutdown_channel(struct uart_amba_port *uap,
1409 unsigned int lcrh)
1410{
1411 unsigned long val;
1412
1413 val = readw(uap->port.membase + lcrh);
1414 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
1415 writew(val, uap->port.membase + lcrh);
1416}
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418static void pl011_shutdown(struct uart_port *port)
1419{
1420 struct uart_amba_port *uap = (struct uart_amba_port *)port;
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301421 unsigned int cr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423 /*
1424 * disable all interrupts
1425 */
1426 spin_lock_irq(&uap->port.lock);
1427 uap->im = 0;
1428 writew(uap->im, uap->port.membase + UART011_IMSC);
1429 writew(0xffff, uap->port.membase + UART011_ICR);
1430 spin_unlock_irq(&uap->port.lock);
1431
Russell King68b65f72010-12-22 17:24:39 +00001432 pl011_dma_shutdown(uap);
1433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 /*
1435 * Free the interrupt
1436 */
1437 free_irq(uap->port.irq, uap);
1438
1439 /*
1440 * disable the port
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301441 * disable the port. It should not disable RTS and DTR.
1442 * Also RTS and DTR state should be preserved to restore
1443 * it during startup().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 */
Rabin Vincent3b438162010-02-12 06:43:11 +01001445 uap->autorts = false;
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301446 cr = readw(uap->port.membase + UART011_CR);
1447 uap->old_cr = cr;
1448 cr &= UART011_CR_RTS | UART011_CR_DTR;
1449 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1450 writew(cr, uap->port.membase + UART011_CR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451
1452 /*
1453 * disable break condition and fifos
1454 */
Linus Walleijec489aa2010-06-02 08:13:52 +01001455 pl011_shutdown_channel(uap, uap->lcrh_rx);
1456 if (uap->lcrh_rx != uap->lcrh_tx)
1457 pl011_shutdown_channel(uap, uap->lcrh_tx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458
1459 /*
1460 * Shut down the clock producer
1461 */
1462 clk_disable(uap->clk);
Russell King4b4851c2011-09-22 11:35:30 +01001463 clk_unprepare(uap->clk);
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02001464
1465 if (uap->port.dev->platform_data) {
1466 struct amba_pl011_data *plat;
1467
1468 plat = uap->port.dev->platform_data;
1469 if (plat->exit)
1470 plat->exit();
1471 }
1472
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473}
1474
1475static void
Alan Cox606d0992006-12-08 02:38:45 -08001476pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1477 struct ktermios *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478{
Rabin Vincent3b438162010-02-12 06:43:11 +01001479 struct uart_amba_port *uap = (struct uart_amba_port *)port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 unsigned int lcr_h, old_cr;
1481 unsigned long flags;
Russell Kingc19f12b2010-12-22 17:48:26 +00001482 unsigned int baud, quot, clkdiv;
1483
1484 if (uap->vendor->oversampling)
1485 clkdiv = 8;
1486 else
1487 clkdiv = 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488
1489 /*
1490 * Ask the core to calculate the divisor for us.
1491 */
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001492 baud = uart_get_baud_rate(port, termios, old, 0,
Russell Kingc19f12b2010-12-22 17:48:26 +00001493 port->uartclk / clkdiv);
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001494
1495 if (baud > port->uartclk/16)
1496 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1497 else
1498 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499
1500 switch (termios->c_cflag & CSIZE) {
1501 case CS5:
1502 lcr_h = UART01x_LCRH_WLEN_5;
1503 break;
1504 case CS6:
1505 lcr_h = UART01x_LCRH_WLEN_6;
1506 break;
1507 case CS7:
1508 lcr_h = UART01x_LCRH_WLEN_7;
1509 break;
1510 default: // CS8
1511 lcr_h = UART01x_LCRH_WLEN_8;
1512 break;
1513 }
1514 if (termios->c_cflag & CSTOPB)
1515 lcr_h |= UART01x_LCRH_STP2;
1516 if (termios->c_cflag & PARENB) {
1517 lcr_h |= UART01x_LCRH_PEN;
1518 if (!(termios->c_cflag & PARODD))
1519 lcr_h |= UART01x_LCRH_EPS;
1520 }
Russell Kingffca2b12010-12-22 17:13:05 +00001521 if (uap->fifosize > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 lcr_h |= UART01x_LCRH_FEN;
1523
1524 spin_lock_irqsave(&port->lock, flags);
1525
1526 /*
1527 * Update the per-port timeout.
1528 */
1529 uart_update_timeout(port, termios->c_cflag, baud);
1530
Russell Kingb63d4f02005-11-19 11:10:35 +00001531 port->read_status_mask = UART011_DR_OE | 255;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 if (termios->c_iflag & INPCK)
Russell Kingb63d4f02005-11-19 11:10:35 +00001533 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 if (termios->c_iflag & (BRKINT | PARMRK))
Russell Kingb63d4f02005-11-19 11:10:35 +00001535 port->read_status_mask |= UART011_DR_BE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 /*
1538 * Characters to ignore
1539 */
1540 port->ignore_status_mask = 0;
1541 if (termios->c_iflag & IGNPAR)
Russell Kingb63d4f02005-11-19 11:10:35 +00001542 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 if (termios->c_iflag & IGNBRK) {
Russell Kingb63d4f02005-11-19 11:10:35 +00001544 port->ignore_status_mask |= UART011_DR_BE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545 /*
1546 * If we're ignoring parity and break indicators,
1547 * ignore overruns too (for real raw support).
1548 */
1549 if (termios->c_iflag & IGNPAR)
Russell Kingb63d4f02005-11-19 11:10:35 +00001550 port->ignore_status_mask |= UART011_DR_OE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 }
1552
1553 /*
1554 * Ignore all characters if CREAD is not set.
1555 */
1556 if ((termios->c_cflag & CREAD) == 0)
Russell Kingb63d4f02005-11-19 11:10:35 +00001557 port->ignore_status_mask |= UART_DUMMY_DR_RX;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 if (UART_ENABLE_MS(port, termios->c_cflag))
1560 pl011_enable_ms(port);
1561
1562 /* first, disable everything */
1563 old_cr = readw(port->membase + UART011_CR);
1564 writew(0, port->membase + UART011_CR);
1565
Rabin Vincent3b438162010-02-12 06:43:11 +01001566 if (termios->c_cflag & CRTSCTS) {
1567 if (old_cr & UART011_CR_RTS)
1568 old_cr |= UART011_CR_RTSEN;
1569
1570 old_cr |= UART011_CR_CTSEN;
1571 uap->autorts = true;
1572 } else {
1573 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1574 uap->autorts = false;
1575 }
1576
Russell Kingc19f12b2010-12-22 17:48:26 +00001577 if (uap->vendor->oversampling) {
1578 if (baud > port->uartclk / 16)
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001579 old_cr |= ST_UART011_CR_OVSFACT;
1580 else
1581 old_cr &= ~ST_UART011_CR_OVSFACT;
1582 }
1583
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 /* Set baud rate */
1585 writew(quot & 0x3f, port->membase + UART011_FBRD);
1586 writew(quot >> 6, port->membase + UART011_IBRD);
1587
1588 /*
1589 * ----------v----------v----------v----------v-----
1590 * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
1591 * ----------^----------^----------^----------^-----
1592 */
Linus Walleijec489aa2010-06-02 08:13:52 +01001593 writew(lcr_h, port->membase + uap->lcrh_rx);
1594 if (uap->lcrh_rx != uap->lcrh_tx) {
1595 int i;
1596 /*
1597 * Wait 10 PCLKs before writing LCRH_TX register,
1598 * to get this delay write read only register 10 times
1599 */
1600 for (i = 0; i < 10; ++i)
1601 writew(0xff, uap->port.membase + UART011_MIS);
1602 writew(lcr_h, port->membase + uap->lcrh_tx);
1603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 writew(old_cr, port->membase + UART011_CR);
1605
1606 spin_unlock_irqrestore(&port->lock, flags);
1607}
1608
1609static const char *pl011_type(struct uart_port *port)
1610{
Russell Kinge8a7ba82010-12-28 09:16:54 +00001611 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1612 return uap->port.type == PORT_AMBA ? uap->type : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613}
1614
1615/*
1616 * Release the memory region(s) being used by 'port'
1617 */
1618static void pl010_release_port(struct uart_port *port)
1619{
1620 release_mem_region(port->mapbase, SZ_4K);
1621}
1622
1623/*
1624 * Request the memory region(s) being used by 'port'
1625 */
1626static int pl010_request_port(struct uart_port *port)
1627{
1628 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
1629 != NULL ? 0 : -EBUSY;
1630}
1631
1632/*
1633 * Configure/autoconfigure the port.
1634 */
1635static void pl010_config_port(struct uart_port *port, int flags)
1636{
1637 if (flags & UART_CONFIG_TYPE) {
1638 port->type = PORT_AMBA;
1639 pl010_request_port(port);
1640 }
1641}
1642
1643/*
1644 * verify the new serial_struct (for TIOCSSERIAL).
1645 */
1646static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
1647{
1648 int ret = 0;
1649 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
1650 ret = -EINVAL;
Yinghai Lua62c4132008-08-19 20:49:55 -07001651 if (ser->irq < 0 || ser->irq >= nr_irqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 ret = -EINVAL;
1653 if (ser->baud_base < 9600)
1654 ret = -EINVAL;
1655 return ret;
1656}
1657
1658static struct uart_ops amba_pl011_pops = {
1659 .tx_empty = pl01x_tx_empty,
1660 .set_mctrl = pl011_set_mctrl,
1661 .get_mctrl = pl01x_get_mctrl,
1662 .stop_tx = pl011_stop_tx,
1663 .start_tx = pl011_start_tx,
1664 .stop_rx = pl011_stop_rx,
1665 .enable_ms = pl011_enable_ms,
1666 .break_ctl = pl011_break_ctl,
1667 .startup = pl011_startup,
1668 .shutdown = pl011_shutdown,
Russell King68b65f72010-12-22 17:24:39 +00001669 .flush_buffer = pl011_dma_flush_buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 .set_termios = pl011_set_termios,
1671 .type = pl011_type,
1672 .release_port = pl010_release_port,
1673 .request_port = pl010_request_port,
1674 .config_port = pl010_config_port,
1675 .verify_port = pl010_verify_port,
Jason Wessel84b5ae12008-02-20 13:33:39 -06001676#ifdef CONFIG_CONSOLE_POLL
1677 .poll_get_char = pl010_get_poll_char,
1678 .poll_put_char = pl010_put_poll_char,
1679#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680};
1681
1682static struct uart_amba_port *amba_ports[UART_NR];
1683
1684#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
1685
Russell Kingd3587882006-03-20 20:00:09 +00001686static void pl011_console_putchar(struct uart_port *port, int ch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687{
Russell Kingd3587882006-03-20 20:00:09 +00001688 struct uart_amba_port *uap = (struct uart_amba_port *)port;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
Russell Kingd3587882006-03-20 20:00:09 +00001690 while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
1691 barrier();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 writew(ch, uap->port.membase + UART01x_DR);
1693}
1694
1695static void
1696pl011_console_write(struct console *co, const char *s, unsigned int count)
1697{
1698 struct uart_amba_port *uap = amba_ports[co->index];
1699 unsigned int status, old_cr, new_cr;
Rabin Vincentef605fd2012-01-17 11:52:28 +01001700 unsigned long flags;
1701 int locked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
1703 clk_enable(uap->clk);
1704
Rabin Vincentef605fd2012-01-17 11:52:28 +01001705 local_irq_save(flags);
1706 if (uap->port.sysrq)
1707 locked = 0;
1708 else if (oops_in_progress)
1709 locked = spin_trylock(&uap->port.lock);
1710 else
1711 spin_lock(&uap->port.lock);
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 /*
1714 * First save the CR then disable the interrupts
1715 */
1716 old_cr = readw(uap->port.membase + UART011_CR);
1717 new_cr = old_cr & ~UART011_CR_CTSEN;
1718 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
1719 writew(new_cr, uap->port.membase + UART011_CR);
1720
Russell Kingd3587882006-03-20 20:00:09 +00001721 uart_console_write(&uap->port, s, count, pl011_console_putchar);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
1723 /*
1724 * Finally, wait for transmitter to become empty
1725 * and restore the TCR
1726 */
1727 do {
1728 status = readw(uap->port.membase + UART01x_FR);
1729 } while (status & UART01x_FR_BUSY);
1730 writew(old_cr, uap->port.membase + UART011_CR);
1731
Rabin Vincentef605fd2012-01-17 11:52:28 +01001732 if (locked)
1733 spin_unlock(&uap->port.lock);
1734 local_irq_restore(flags);
1735
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 clk_disable(uap->clk);
1737}
1738
1739static void __init
1740pl011_console_get_options(struct uart_amba_port *uap, int *baud,
1741 int *parity, int *bits)
1742{
1743 if (readw(uap->port.membase + UART011_CR) & UART01x_CR_UARTEN) {
1744 unsigned int lcr_h, ibrd, fbrd;
1745
Linus Walleijec489aa2010-06-02 08:13:52 +01001746 lcr_h = readw(uap->port.membase + uap->lcrh_tx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 *parity = 'n';
1749 if (lcr_h & UART01x_LCRH_PEN) {
1750 if (lcr_h & UART01x_LCRH_EPS)
1751 *parity = 'e';
1752 else
1753 *parity = 'o';
1754 }
1755
1756 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
1757 *bits = 7;
1758 else
1759 *bits = 8;
1760
1761 ibrd = readw(uap->port.membase + UART011_IBRD);
1762 fbrd = readw(uap->port.membase + UART011_FBRD);
1763
1764 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001765
Russell Kingc19f12b2010-12-22 17:48:26 +00001766 if (uap->vendor->oversampling) {
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001767 if (readw(uap->port.membase + UART011_CR)
1768 & ST_UART011_CR_OVSFACT)
1769 *baud *= 2;
1770 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 }
1772}
1773
1774static int __init pl011_console_setup(struct console *co, char *options)
1775{
1776 struct uart_amba_port *uap;
1777 int baud = 38400;
1778 int bits = 8;
1779 int parity = 'n';
1780 int flow = 'n';
Russell King4b4851c2011-09-22 11:35:30 +01001781 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
1783 /*
1784 * Check whether an invalid uart number has been specified, and
1785 * if so, search for the first available port that does have
1786 * console support.
1787 */
1788 if (co->index >= UART_NR)
1789 co->index = 0;
1790 uap = amba_ports[co->index];
Russell Kingd28122a2007-01-22 18:59:42 +00001791 if (!uap)
1792 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
Russell King4b4851c2011-09-22 11:35:30 +01001794 ret = clk_prepare(uap->clk);
1795 if (ret)
1796 return ret;
1797
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02001798 if (uap->port.dev->platform_data) {
1799 struct amba_pl011_data *plat;
1800
1801 plat = uap->port.dev->platform_data;
1802 if (plat->init)
1803 plat->init();
1804 }
1805
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 uap->port.uartclk = clk_get_rate(uap->clk);
1807
1808 if (options)
1809 uart_parse_options(options, &baud, &parity, &bits, &flow);
1810 else
1811 pl011_console_get_options(uap, &baud, &parity, &bits);
1812
1813 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
1814}
1815
Vincent Sanders2d934862005-09-14 22:36:03 +01001816static struct uart_driver amba_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817static struct console amba_console = {
1818 .name = "ttyAMA",
1819 .write = pl011_console_write,
1820 .device = uart_console_device,
1821 .setup = pl011_console_setup,
1822 .flags = CON_PRINTBUFFER,
1823 .index = -1,
1824 .data = &amba_reg,
1825};
1826
1827#define AMBA_CONSOLE (&amba_console)
1828#else
1829#define AMBA_CONSOLE NULL
1830#endif
1831
1832static struct uart_driver amba_reg = {
1833 .owner = THIS_MODULE,
1834 .driver_name = "ttyAMA",
1835 .dev_name = "ttyAMA",
1836 .major = SERIAL_AMBA_MAJOR,
1837 .minor = SERIAL_AMBA_MINOR,
1838 .nr = UART_NR,
1839 .cons = AMBA_CONSOLE,
1840};
1841
Russell Kingaa25afa2011-02-19 15:55:00 +00001842static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843{
1844 struct uart_amba_port *uap;
Alessandro Rubini5926a292009-06-04 17:43:04 +01001845 struct vendor_data *vendor = id->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 void __iomem *base;
1847 int i, ret;
1848
1849 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1850 if (amba_ports[i] == NULL)
1851 break;
1852
1853 if (i == ARRAY_SIZE(amba_ports)) {
1854 ret = -EBUSY;
1855 goto out;
1856 }
1857
Yoann Padioleaudd00cc42007-07-19 01:49:03 -07001858 uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 if (uap == NULL) {
1860 ret = -ENOMEM;
1861 goto out;
1862 }
1863
Linus Walleijdc890c22009-06-07 23:27:31 +01001864 base = ioremap(dev->res.start, resource_size(&dev->res));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865 if (!base) {
1866 ret = -ENOMEM;
1867 goto free;
1868 }
1869
Russell Kingee569c42008-11-30 17:38:14 +00001870 uap->clk = clk_get(&dev->dev, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 if (IS_ERR(uap->clk)) {
1872 ret = PTR_ERR(uap->clk);
1873 goto unmap;
1874 }
1875
Linus Walleij9b96fba2012-03-13 13:27:23 +01001876 /* Ensure interrupts from this UART are masked and cleared */
1877 writew(0, uap->port.membase + UART011_IMSC);
1878 writew(0xffff, uap->port.membase + UART011_ICR);
1879
Russell Kingc19f12b2010-12-22 17:48:26 +00001880 uap->vendor = vendor;
Linus Walleijec489aa2010-06-02 08:13:52 +01001881 uap->lcrh_rx = vendor->lcrh_rx;
1882 uap->lcrh_tx = vendor->lcrh_tx;
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301883 uap->old_cr = 0;
Russell Kingffca2b12010-12-22 17:13:05 +00001884 uap->fifosize = vendor->fifosize;
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02001885 uap->interrupt_may_hang = vendor->interrupt_may_hang;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 uap->port.dev = &dev->dev;
1887 uap->port.mapbase = dev->res.start;
1888 uap->port.membase = base;
1889 uap->port.iotype = UPIO_MEM;
1890 uap->port.irq = dev->irq[0];
Russell Kingffca2b12010-12-22 17:13:05 +00001891 uap->port.fifosize = uap->fifosize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 uap->port.ops = &amba_pl011_pops;
1893 uap->port.flags = UPF_BOOT_AUTOCONF;
1894 uap->port.line = i;
Russell King68b65f72010-12-22 17:24:39 +00001895 pl011_dma_probe(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896
Russell Kinge8a7ba82010-12-28 09:16:54 +00001897 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 amba_ports[i] = uap;
1900
1901 amba_set_drvdata(dev, uap);
1902 ret = uart_add_one_port(&amba_reg, &uap->port);
1903 if (ret) {
1904 amba_set_drvdata(dev, NULL);
1905 amba_ports[i] = NULL;
Russell King68b65f72010-12-22 17:24:39 +00001906 pl011_dma_remove(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 clk_put(uap->clk);
1908 unmap:
1909 iounmap(base);
1910 free:
1911 kfree(uap);
1912 }
1913 out:
1914 return ret;
1915}
1916
1917static int pl011_remove(struct amba_device *dev)
1918{
1919 struct uart_amba_port *uap = amba_get_drvdata(dev);
1920 int i;
1921
1922 amba_set_drvdata(dev, NULL);
1923
1924 uart_remove_one_port(&amba_reg, &uap->port);
1925
1926 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
1927 if (amba_ports[i] == uap)
1928 amba_ports[i] = NULL;
1929
Russell King68b65f72010-12-22 17:24:39 +00001930 pl011_dma_remove(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 iounmap(uap->port.membase);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 clk_put(uap->clk);
1933 kfree(uap);
1934 return 0;
1935}
1936
Leo Chenb736b892009-07-28 23:43:33 +01001937#ifdef CONFIG_PM
1938static int pl011_suspend(struct amba_device *dev, pm_message_t state)
1939{
1940 struct uart_amba_port *uap = amba_get_drvdata(dev);
1941
1942 if (!uap)
1943 return -EINVAL;
1944
1945 return uart_suspend_port(&amba_reg, &uap->port);
1946}
1947
1948static int pl011_resume(struct amba_device *dev)
1949{
1950 struct uart_amba_port *uap = amba_get_drvdata(dev);
1951
1952 if (!uap)
1953 return -EINVAL;
1954
1955 return uart_resume_port(&amba_reg, &uap->port);
1956}
1957#endif
1958
Russell King2c39c9e2010-07-27 08:50:16 +01001959static struct amba_id pl011_ids[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 {
1961 .id = 0x00041011,
1962 .mask = 0x000fffff,
Alessandro Rubini5926a292009-06-04 17:43:04 +01001963 .data = &vendor_arm,
1964 },
1965 {
1966 .id = 0x00380802,
1967 .mask = 0x00ffffff,
1968 .data = &vendor_st,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 },
1970 { 0, 0 },
1971};
1972
Dave Martin60f7a332011-10-05 15:15:22 +01001973MODULE_DEVICE_TABLE(amba, pl011_ids);
1974
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975static struct amba_driver pl011_driver = {
1976 .drv = {
1977 .name = "uart-pl011",
1978 },
1979 .id_table = pl011_ids,
1980 .probe = pl011_probe,
1981 .remove = pl011_remove,
Leo Chenb736b892009-07-28 23:43:33 +01001982#ifdef CONFIG_PM
1983 .suspend = pl011_suspend,
1984 .resume = pl011_resume,
1985#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986};
1987
1988static int __init pl011_init(void)
1989{
1990 int ret;
1991 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
1992
1993 ret = uart_register_driver(&amba_reg);
1994 if (ret == 0) {
1995 ret = amba_driver_register(&pl011_driver);
1996 if (ret)
1997 uart_unregister_driver(&amba_reg);
1998 }
1999 return ret;
2000}
2001
2002static void __exit pl011_exit(void)
2003{
2004 amba_driver_unregister(&pl011_driver);
2005 uart_unregister_driver(&amba_reg);
2006}
2007
Alessandro Rubini4dd9e742009-05-05 05:54:13 +01002008/*
2009 * While this can be a module, if builtin it's most likely the console
2010 * So let's leave module_exit but move module_init to an earlier place
2011 */
2012arch_initcall(pl011_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013module_exit(pl011_exit);
2014
2015MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2016MODULE_DESCRIPTION("ARM AMBA serial port driver");
2017MODULE_LICENSE("GPL");