blob: 88ff97e0d6b06ffe0ef70a05327fcf3f13c2f180 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Driver for AMBA serial ports
3 *
4 * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
5 *
6 * Copyright 1999 ARM Limited
7 * Copyright (C) 2000 Deep Blue Solutions Ltd.
Russell King68b65f72010-12-22 17:24:39 +00008 * Copyright (C) 2010 ST-Ericsson SA
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * This is a generic driver for ARM AMBA-type serial ports. They
25 * have a lot of 16550-like features, but are not register compatible.
26 * Note that although they do have CTS, DCD and DSR inputs, they do
27 * not have an RI input, nor do they have DTR or RTS outputs. If
28 * required, these have to be supplied via some other means (eg, GPIO)
29 * and hooked into this driver.
30 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Chanho Mincb06ff12013-03-27 18:38:11 +090032
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
34#define SUPPORT_SYSRQ
35#endif
36
37#include <linux/module.h>
38#include <linux/ioport.h>
39#include <linux/init.h>
40#include <linux/console.h>
41#include <linux/sysrq.h>
42#include <linux/device.h>
43#include <linux/tty.h>
44#include <linux/tty_flip.h>
45#include <linux/serial_core.h>
46#include <linux/serial.h>
Russell Kinga62c80e2006-01-07 13:52:45 +000047#include <linux/amba/bus.h>
48#include <linux/amba/serial.h>
Russell Kingf8ce2542006-01-07 16:15:52 +000049#include <linux/clk.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090050#include <linux/slab.h>
Russell King68b65f72010-12-22 17:24:39 +000051#include <linux/dmaengine.h>
52#include <linux/dma-mapping.h>
53#include <linux/scatterlist.h>
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +020054#include <linux/delay.h>
Viresh Kumar258aea72012-02-01 16:12:19 +053055#include <linux/types.h>
Matthew Leach32614aa2012-08-28 16:41:28 +010056#include <linux/of.h>
57#include <linux/of_device.h>
Shawn Guo258e0552012-05-06 22:53:35 +080058#include <linux/pinctrl/consumer.h>
Alessandro Rubinicb707062012-06-24 12:46:37 +010059#include <linux/sizes.h>
Linus Walleijde609582012-10-15 13:36:01 +020060#include <linux/io.h>
Graeme Gregory3db9ab02015-05-21 17:26:24 +010061#include <linux/acpi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Russell King9f25bc52015-11-03 14:51:13 +000063#include "amba-pl011.h"
64
Linus Torvalds1da177e2005-04-16 15:20:36 -070065#define UART_NR 14
66
67#define SERIAL_AMBA_MAJOR 204
68#define SERIAL_AMBA_MINOR 64
69#define SERIAL_AMBA_NR UART_NR
70
71#define AMBA_ISR_PASS_LIMIT 256
72
Russell Kingb63d4f02005-11-19 11:10:35 +000073#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
74#define UART_DUMMY_DR_RX (1 << 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Russell Kingdebb7f62015-11-16 17:40:26 +000076static u16 pl011_std_offsets[REG_ARRAY_SIZE] = {
77 [REG_DR] = UART01x_DR,
78 [REG_ST_DMAWM] = ST_UART011_DMAWM,
79 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
80 [REG_FR] = UART01x_FR,
Russell Kinge4df9a82015-11-16 17:40:41 +000081 [REG_LCRH_RX] = UART011_LCRH,
82 [REG_LCRH_TX] = UART011_LCRH,
Russell Kingdebb7f62015-11-16 17:40:26 +000083 [REG_IBRD] = UART011_IBRD,
84 [REG_FBRD] = UART011_FBRD,
Russell Kingdebb7f62015-11-16 17:40:26 +000085 [REG_CR] = UART011_CR,
86 [REG_IFLS] = UART011_IFLS,
87 [REG_IMSC] = UART011_IMSC,
88 [REG_RIS] = UART011_RIS,
89 [REG_MIS] = UART011_MIS,
90 [REG_ICR] = UART011_ICR,
91 [REG_DMACR] = UART011_DMACR,
92 [REG_ST_XFCR] = ST_UART011_XFCR,
93 [REG_ST_XON1] = ST_UART011_XON1,
94 [REG_ST_XON2] = ST_UART011_XON2,
95 [REG_ST_XOFF1] = ST_UART011_XOFF1,
96 [REG_ST_XOFF2] = ST_UART011_XOFF2,
97 [REG_ST_ITCR] = ST_UART011_ITCR,
98 [REG_ST_ITIP] = ST_UART011_ITIP,
99 [REG_ST_ABCR] = ST_UART011_ABCR,
100 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
101};
102
Alessandro Rubini5926a292009-06-04 17:43:04 +0100103/* There is by now at least one vendor with differing details, so handle it */
104struct vendor_data {
Russell King439403b2015-11-16 17:40:31 +0000105 const u16 *reg_offset;
Alessandro Rubini5926a292009-06-04 17:43:04 +0100106 unsigned int ifls;
Linus Walleijac3e3fb2010-06-02 20:40:22 +0100107 bool oversampling;
Russell King38d62432010-12-22 17:59:16 +0000108 bool dma_threshold;
Rajanikanth H.V4fd06902012-03-26 11:17:02 +0200109 bool cts_event_workaround;
Andre Przywara71eec482015-05-21 17:26:21 +0100110 bool always_enabled;
Andre Przywaracefc2d12015-05-21 17:26:22 +0100111 bool fixed_options;
Jongsung Kim78506f22013-04-15 14:45:25 +0900112
Jongsung Kimea336402013-05-10 18:05:35 +0900113 unsigned int (*get_fifosize)(struct amba_device *dev);
Alessandro Rubini5926a292009-06-04 17:43:04 +0100114};
115
Jongsung Kimea336402013-05-10 18:05:35 +0900116static unsigned int get_fifosize_arm(struct amba_device *dev)
Jongsung Kim78506f22013-04-15 14:45:25 +0900117{
Jongsung Kimea336402013-05-10 18:05:35 +0900118 return amba_rev(dev) < 3 ? 16 : 32;
Jongsung Kim78506f22013-04-15 14:45:25 +0900119}
120
Alessandro Rubini5926a292009-06-04 17:43:04 +0100121static struct vendor_data vendor_arm = {
Russell King439403b2015-11-16 17:40:31 +0000122 .reg_offset = pl011_std_offsets,
Alessandro Rubini5926a292009-06-04 17:43:04 +0100123 .ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
Linus Walleijac3e3fb2010-06-02 20:40:22 +0100124 .oversampling = false,
Russell King38d62432010-12-22 17:59:16 +0000125 .dma_threshold = false,
Rajanikanth H.V4fd06902012-03-26 11:17:02 +0200126 .cts_event_workaround = false,
Andre Przywara71eec482015-05-21 17:26:21 +0100127 .always_enabled = false,
Andre Przywaracefc2d12015-05-21 17:26:22 +0100128 .fixed_options = false,
Jongsung Kim78506f22013-04-15 14:45:25 +0900129 .get_fifosize = get_fifosize_arm,
Alessandro Rubini5926a292009-06-04 17:43:04 +0100130};
131
Andre Przywara0dd1e242015-05-21 17:26:23 +0100132static struct vendor_data vendor_sbsa = {
Russell King439403b2015-11-16 17:40:31 +0000133 .reg_offset = pl011_std_offsets,
Andre Przywara0dd1e242015-05-21 17:26:23 +0100134 .oversampling = false,
135 .dma_threshold = false,
136 .cts_event_workaround = false,
137 .always_enabled = true,
138 .fixed_options = true,
139};
140
Russell Kingbf69ff82015-11-16 17:40:36 +0000141static u16 pl011_st_offsets[REG_ARRAY_SIZE] = {
142 [REG_DR] = UART01x_DR,
143 [REG_ST_DMAWM] = ST_UART011_DMAWM,
144 [REG_ST_TIMEOUT] = ST_UART011_TIMEOUT,
145 [REG_FR] = UART01x_FR,
Russell Kinge4df9a82015-11-16 17:40:41 +0000146 [REG_LCRH_RX] = ST_UART011_LCRH_RX,
147 [REG_LCRH_TX] = ST_UART011_LCRH_TX,
Russell Kingbf69ff82015-11-16 17:40:36 +0000148 [REG_IBRD] = UART011_IBRD,
149 [REG_FBRD] = UART011_FBRD,
Russell Kingbf69ff82015-11-16 17:40:36 +0000150 [REG_CR] = UART011_CR,
151 [REG_IFLS] = UART011_IFLS,
152 [REG_IMSC] = UART011_IMSC,
153 [REG_RIS] = UART011_RIS,
154 [REG_MIS] = UART011_MIS,
155 [REG_ICR] = UART011_ICR,
156 [REG_DMACR] = UART011_DMACR,
157 [REG_ST_XFCR] = ST_UART011_XFCR,
158 [REG_ST_XON1] = ST_UART011_XON1,
159 [REG_ST_XON2] = ST_UART011_XON2,
160 [REG_ST_XOFF1] = ST_UART011_XOFF1,
161 [REG_ST_XOFF2] = ST_UART011_XOFF2,
162 [REG_ST_ITCR] = ST_UART011_ITCR,
163 [REG_ST_ITIP] = ST_UART011_ITIP,
164 [REG_ST_ABCR] = ST_UART011_ABCR,
165 [REG_ST_ABIMSC] = ST_UART011_ABIMSC,
166};
167
Jongsung Kimea336402013-05-10 18:05:35 +0900168static unsigned int get_fifosize_st(struct amba_device *dev)
Jongsung Kim78506f22013-04-15 14:45:25 +0900169{
170 return 64;
171}
172
Alessandro Rubini5926a292009-06-04 17:43:04 +0100173static struct vendor_data vendor_st = {
Russell Kingbf69ff82015-11-16 17:40:36 +0000174 .reg_offset = pl011_st_offsets,
Alessandro Rubini5926a292009-06-04 17:43:04 +0100175 .ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
Linus Walleijac3e3fb2010-06-02 20:40:22 +0100176 .oversampling = true,
Russell King38d62432010-12-22 17:59:16 +0000177 .dma_threshold = true,
Rajanikanth H.V4fd06902012-03-26 11:17:02 +0200178 .cts_event_workaround = true,
Andre Przywara71eec482015-05-21 17:26:21 +0100179 .always_enabled = false,
Andre Przywaracefc2d12015-05-21 17:26:22 +0100180 .fixed_options = false,
Jongsung Kim78506f22013-04-15 14:45:25 +0900181 .get_fifosize = get_fifosize_st,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182};
183
Russell King68b65f72010-12-22 17:24:39 +0000184/* Deals with DMA transactions */
Linus Walleijead76f32011-02-24 13:21:08 +0100185
186struct pl011_sgbuf {
187 struct scatterlist sg;
188 char *buf;
189};
190
191struct pl011_dmarx_data {
192 struct dma_chan *chan;
193 struct completion complete;
194 bool use_buf_b;
195 struct pl011_sgbuf sgbuf_a;
196 struct pl011_sgbuf sgbuf_b;
197 dma_cookie_t cookie;
198 bool running;
Chanho Mincb06ff12013-03-27 18:38:11 +0900199 struct timer_list timer;
200 unsigned int last_residue;
201 unsigned long last_jiffies;
202 bool auto_poll_rate;
203 unsigned int poll_rate;
204 unsigned int poll_timeout;
Linus Walleijead76f32011-02-24 13:21:08 +0100205};
206
Russell King68b65f72010-12-22 17:24:39 +0000207struct pl011_dmatx_data {
208 struct dma_chan *chan;
209 struct scatterlist sg;
210 char *buf;
211 bool queued;
212};
213
Russell Kingc19f12b2010-12-22 17:48:26 +0000214/*
215 * We wrap our port structure around the generic uart_port.
216 */
217struct uart_amba_port {
218 struct uart_port port;
Russell Kingdebb7f62015-11-16 17:40:26 +0000219 const u16 *reg_offset;
Russell Kingc19f12b2010-12-22 17:48:26 +0000220 struct clk *clk;
221 const struct vendor_data *vendor;
Russell King68b65f72010-12-22 17:24:39 +0000222 unsigned int dmacr; /* dma control reg */
Russell Kingc19f12b2010-12-22 17:48:26 +0000223 unsigned int im; /* interrupt mask */
224 unsigned int old_status;
Russell Kingffca2b12010-12-22 17:13:05 +0000225 unsigned int fifosize; /* vendor-specific */
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +0530226 unsigned int old_cr; /* state during shutdown */
Russell Kingc19f12b2010-12-22 17:48:26 +0000227 bool autorts;
Andre Przywaracefc2d12015-05-21 17:26:22 +0100228 unsigned int fixed_baud; /* vendor-set fixed baud rate */
Russell Kingc19f12b2010-12-22 17:48:26 +0000229 char type[12];
Russell King68b65f72010-12-22 17:24:39 +0000230#ifdef CONFIG_DMA_ENGINE
231 /* DMA stuff */
Linus Walleijead76f32011-02-24 13:21:08 +0100232 bool using_tx_dma;
233 bool using_rx_dma;
234 struct pl011_dmarx_data dmarx;
Russell King68b65f72010-12-22 17:24:39 +0000235 struct pl011_dmatx_data dmatx;
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -0500236 bool dma_probed;
Russell King68b65f72010-12-22 17:24:39 +0000237#endif
Russell Kingc19f12b2010-12-22 17:48:26 +0000238};
239
Russell King9f25bc52015-11-03 14:51:13 +0000240static unsigned int pl011_reg_to_offset(const struct uart_amba_port *uap,
241 unsigned int reg)
242{
Russell Kingdebb7f62015-11-16 17:40:26 +0000243 return uap->reg_offset[reg];
Russell King9f25bc52015-11-03 14:51:13 +0000244}
245
Russell Kingb2a4e242015-11-03 14:51:03 +0000246static unsigned int pl011_read(const struct uart_amba_port *uap,
247 unsigned int reg)
Russell King75836332015-11-03 14:50:58 +0000248{
Russell King9f25bc52015-11-03 14:51:13 +0000249 return readw(uap->port.membase + pl011_reg_to_offset(uap, reg));
Russell King75836332015-11-03 14:50:58 +0000250}
251
Russell Kingb2a4e242015-11-03 14:51:03 +0000252static void pl011_write(unsigned int val, const struct uart_amba_port *uap,
253 unsigned int reg)
Russell King75836332015-11-03 14:50:58 +0000254{
Russell King9f25bc52015-11-03 14:51:13 +0000255 writew(val, uap->port.membase + pl011_reg_to_offset(uap, reg));
Russell King75836332015-11-03 14:50:58 +0000256}
257
Russell King68b65f72010-12-22 17:24:39 +0000258/*
Linus Walleij29772c42011-02-24 13:21:36 +0100259 * Reads up to 256 characters from the FIFO or until it's empty and
260 * inserts them into the TTY layer. Returns the number of characters
261 * read from the FIFO.
262 */
263static int pl011_fifo_to_tty(struct uart_amba_port *uap)
264{
Timur Tabi71a5cd82015-10-07 15:27:16 -0500265 u16 status;
266 unsigned int ch, flag, max_count = 256;
Linus Walleij29772c42011-02-24 13:21:36 +0100267 int fifotaken = 0;
268
269 while (max_count--) {
Russell King9f25bc52015-11-03 14:51:13 +0000270 status = pl011_read(uap, REG_FR);
Linus Walleij29772c42011-02-24 13:21:36 +0100271 if (status & UART01x_FR_RXFE)
272 break;
273
274 /* Take chars from the FIFO and update status */
Russell King9f25bc52015-11-03 14:51:13 +0000275 ch = pl011_read(uap, REG_DR) | UART_DUMMY_DR_RX;
Linus Walleij29772c42011-02-24 13:21:36 +0100276 flag = TTY_NORMAL;
277 uap->port.icount.rx++;
278 fifotaken++;
279
280 if (unlikely(ch & UART_DR_ERROR)) {
281 if (ch & UART011_DR_BE) {
282 ch &= ~(UART011_DR_FE | UART011_DR_PE);
283 uap->port.icount.brk++;
284 if (uart_handle_break(&uap->port))
285 continue;
286 } else if (ch & UART011_DR_PE)
287 uap->port.icount.parity++;
288 else if (ch & UART011_DR_FE)
289 uap->port.icount.frame++;
290 if (ch & UART011_DR_OE)
291 uap->port.icount.overrun++;
292
293 ch &= uap->port.read_status_mask;
294
295 if (ch & UART011_DR_BE)
296 flag = TTY_BREAK;
297 else if (ch & UART011_DR_PE)
298 flag = TTY_PARITY;
299 else if (ch & UART011_DR_FE)
300 flag = TTY_FRAME;
301 }
302
303 if (uart_handle_sysrq_char(&uap->port, ch & 255))
304 continue;
305
306 uart_insert_char(&uap->port, ch, UART011_DR_OE, ch, flag);
307 }
308
309 return fifotaken;
310}
311
312
313/*
Russell King68b65f72010-12-22 17:24:39 +0000314 * All the DMA operation mode stuff goes inside this ifdef.
315 * This assumes that you have a generic DMA device interface,
316 * no custom DMA interfaces are supported.
317 */
318#ifdef CONFIG_DMA_ENGINE
319
320#define PL011_DMA_BUFFER_SIZE PAGE_SIZE
321
Linus Walleijead76f32011-02-24 13:21:08 +0100322static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
323 enum dma_data_direction dir)
324{
Chanho Mincb06ff12013-03-27 18:38:11 +0900325 dma_addr_t dma_addr;
326
327 sg->buf = dma_alloc_coherent(chan->device->dev,
328 PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
Linus Walleijead76f32011-02-24 13:21:08 +0100329 if (!sg->buf)
330 return -ENOMEM;
331
Chanho Mincb06ff12013-03-27 18:38:11 +0900332 sg_init_table(&sg->sg, 1);
333 sg_set_page(&sg->sg, phys_to_page(dma_addr),
334 PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
335 sg_dma_address(&sg->sg) = dma_addr;
Andrew Jacksonc64be922014-11-07 14:14:43 +0000336 sg_dma_len(&sg->sg) = PL011_DMA_BUFFER_SIZE;
Linus Walleijead76f32011-02-24 13:21:08 +0100337
Linus Walleijead76f32011-02-24 13:21:08 +0100338 return 0;
339}
340
341static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
342 enum dma_data_direction dir)
343{
344 if (sg->buf) {
Chanho Mincb06ff12013-03-27 18:38:11 +0900345 dma_free_coherent(chan->device->dev,
346 PL011_DMA_BUFFER_SIZE, sg->buf,
347 sg_dma_address(&sg->sg));
Linus Walleijead76f32011-02-24 13:21:08 +0100348 }
349}
350
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -0500351static void pl011_dma_probe(struct uart_amba_port *uap)
Russell King68b65f72010-12-22 17:24:39 +0000352{
353 /* DMA is the sole user of the platform data right now */
Jingoo Han574de552013-07-30 17:06:57 +0900354 struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -0500355 struct device *dev = uap->port.dev;
Russell King68b65f72010-12-22 17:24:39 +0000356 struct dma_slave_config tx_conf = {
Russell King9f25bc52015-11-03 14:51:13 +0000357 .dst_addr = uap->port.mapbase +
358 pl011_reg_to_offset(uap, REG_DR),
Russell King68b65f72010-12-22 17:24:39 +0000359 .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
Vinod Koula485df42011-10-14 10:47:38 +0530360 .direction = DMA_MEM_TO_DEV,
Russell King68b65f72010-12-22 17:24:39 +0000361 .dst_maxburst = uap->fifosize >> 1,
Viresh Kumar258aea72012-02-01 16:12:19 +0530362 .device_fc = false,
Russell King68b65f72010-12-22 17:24:39 +0000363 };
364 struct dma_chan *chan;
365 dma_cap_mask_t mask;
366
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -0500367 uap->dma_probed = true;
368 chan = dma_request_slave_channel_reason(dev, "tx");
369 if (IS_ERR(chan)) {
370 if (PTR_ERR(chan) == -EPROBE_DEFER) {
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -0500371 uap->dma_probed = false;
372 return;
373 }
Russell King68b65f72010-12-22 17:24:39 +0000374
Arnd Bergmann787b0c12013-01-28 16:24:37 +0000375 /* We need platform data */
376 if (!plat || !plat->dma_filter) {
377 dev_info(uap->port.dev, "no DMA platform data\n");
378 return;
379 }
380
381 /* Try to acquire a generic DMA engine slave TX channel */
382 dma_cap_zero(mask);
383 dma_cap_set(DMA_SLAVE, mask);
384
385 chan = dma_request_channel(mask, plat->dma_filter,
386 plat->dma_tx_param);
387 if (!chan) {
388 dev_err(uap->port.dev, "no TX DMA channel!\n");
389 return;
390 }
Russell King68b65f72010-12-22 17:24:39 +0000391 }
392
393 dmaengine_slave_config(chan, &tx_conf);
394 uap->dmatx.chan = chan;
395
396 dev_info(uap->port.dev, "DMA channel TX %s\n",
397 dma_chan_name(uap->dmatx.chan));
Linus Walleijead76f32011-02-24 13:21:08 +0100398
399 /* Optionally make use of an RX channel as well */
Arnd Bergmann787b0c12013-01-28 16:24:37 +0000400 chan = dma_request_slave_channel(dev, "rx");
Rob Herring0d3c6732014-04-18 17:19:57 -0500401
Arnd Bergmann787b0c12013-01-28 16:24:37 +0000402 if (!chan && plat->dma_rx_param) {
403 chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
404
405 if (!chan) {
406 dev_err(uap->port.dev, "no RX DMA channel!\n");
407 return;
408 }
409 }
410
411 if (chan) {
Linus Walleijead76f32011-02-24 13:21:08 +0100412 struct dma_slave_config rx_conf = {
Russell King9f25bc52015-11-03 14:51:13 +0000413 .src_addr = uap->port.mapbase +
414 pl011_reg_to_offset(uap, REG_DR),
Linus Walleijead76f32011-02-24 13:21:08 +0100415 .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
Vinod Koula485df42011-10-14 10:47:38 +0530416 .direction = DMA_DEV_TO_MEM,
Guennadi Liakhovetskib2aeb772014-04-12 19:47:17 +0200417 .src_maxburst = uap->fifosize >> 2,
Viresh Kumar258aea72012-02-01 16:12:19 +0530418 .device_fc = false,
Linus Walleijead76f32011-02-24 13:21:08 +0100419 };
Andrew Jackson2d3b7d62014-11-07 14:14:47 +0000420 struct dma_slave_caps caps;
Linus Walleijead76f32011-02-24 13:21:08 +0100421
Andrew Jackson2d3b7d62014-11-07 14:14:47 +0000422 /*
423 * Some DMA controllers provide information on their capabilities.
424 * If the controller does, check for suitable residue processing
425 * otherwise assime all is well.
426 */
427 if (0 == dma_get_slave_caps(chan, &caps)) {
428 if (caps.residue_granularity ==
429 DMA_RESIDUE_GRANULARITY_DESCRIPTOR) {
430 dma_release_channel(chan);
431 dev_info(uap->port.dev,
432 "RX DMA disabled - no residue processing\n");
433 return;
434 }
435 }
Linus Walleijead76f32011-02-24 13:21:08 +0100436 dmaengine_slave_config(chan, &rx_conf);
437 uap->dmarx.chan = chan;
438
Andrew Jackson98267d32014-11-07 14:14:23 +0000439 uap->dmarx.auto_poll_rate = false;
Greg Kroah-Hartman8f898bf2013-12-17 09:33:18 -0800440 if (plat && plat->dma_rx_poll_enable) {
Chanho Mincb06ff12013-03-27 18:38:11 +0900441 /* Set poll rate if specified. */
442 if (plat->dma_rx_poll_rate) {
443 uap->dmarx.auto_poll_rate = false;
444 uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
445 } else {
446 /*
447 * 100 ms defaults to poll rate if not
448 * specified. This will be adjusted with
449 * the baud rate at set_termios.
450 */
451 uap->dmarx.auto_poll_rate = true;
452 uap->dmarx.poll_rate = 100;
453 }
454 /* 3 secs defaults poll_timeout if not specified. */
455 if (plat->dma_rx_poll_timeout)
456 uap->dmarx.poll_timeout =
457 plat->dma_rx_poll_timeout;
458 else
459 uap->dmarx.poll_timeout = 3000;
Andrew Jackson98267d32014-11-07 14:14:23 +0000460 } else if (!plat && dev->of_node) {
461 uap->dmarx.auto_poll_rate = of_property_read_bool(
462 dev->of_node, "auto-poll");
463 if (uap->dmarx.auto_poll_rate) {
464 u32 x;
Chanho Mincb06ff12013-03-27 18:38:11 +0900465
Andrew Jackson98267d32014-11-07 14:14:23 +0000466 if (0 == of_property_read_u32(dev->of_node,
467 "poll-rate-ms", &x))
468 uap->dmarx.poll_rate = x;
469 else
470 uap->dmarx.poll_rate = 100;
471 if (0 == of_property_read_u32(dev->of_node,
472 "poll-timeout-ms", &x))
473 uap->dmarx.poll_timeout = x;
474 else
475 uap->dmarx.poll_timeout = 3000;
476 }
477 }
Linus Walleijead76f32011-02-24 13:21:08 +0100478 dev_info(uap->port.dev, "DMA channel RX %s\n",
479 dma_chan_name(uap->dmarx.chan));
480 }
Russell King68b65f72010-12-22 17:24:39 +0000481}
482
Russell King68b65f72010-12-22 17:24:39 +0000483static void pl011_dma_remove(struct uart_amba_port *uap)
484{
Russell King68b65f72010-12-22 17:24:39 +0000485 if (uap->dmatx.chan)
486 dma_release_channel(uap->dmatx.chan);
Linus Walleijead76f32011-02-24 13:21:08 +0100487 if (uap->dmarx.chan)
488 dma_release_channel(uap->dmarx.chan);
Russell King68b65f72010-12-22 17:24:39 +0000489}
490
Dave Martin734745c2015-03-04 12:27:33 +0000491/* Forward declare these for the refill routine */
Russell King68b65f72010-12-22 17:24:39 +0000492static int pl011_dma_tx_refill(struct uart_amba_port *uap);
Dave Martin734745c2015-03-04 12:27:33 +0000493static void pl011_start_tx_pio(struct uart_amba_port *uap);
Russell King68b65f72010-12-22 17:24:39 +0000494
495/*
496 * The current DMA TX buffer has been sent.
497 * Try to queue up another DMA buffer.
498 */
499static void pl011_dma_tx_callback(void *data)
500{
501 struct uart_amba_port *uap = data;
502 struct pl011_dmatx_data *dmatx = &uap->dmatx;
503 unsigned long flags;
504 u16 dmacr;
505
506 spin_lock_irqsave(&uap->port.lock, flags);
507 if (uap->dmatx.queued)
508 dma_unmap_sg(dmatx->chan->device->dev, &dmatx->sg, 1,
509 DMA_TO_DEVICE);
510
511 dmacr = uap->dmacr;
512 uap->dmacr = dmacr & ~UART011_TXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000513 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000514
515 /*
516 * If TX DMA was disabled, it means that we've stopped the DMA for
517 * some reason (eg, XOFF received, or we want to send an X-char.)
518 *
519 * Note: we need to be careful here of a potential race between DMA
520 * and the rest of the driver - if the driver disables TX DMA while
521 * a TX buffer completing, we must update the tx queued status to
522 * get further refills (hence we check dmacr).
523 */
524 if (!(dmacr & UART011_TXDMAE) || uart_tx_stopped(&uap->port) ||
525 uart_circ_empty(&uap->port.state->xmit)) {
526 uap->dmatx.queued = false;
527 spin_unlock_irqrestore(&uap->port.lock, flags);
528 return;
529 }
530
Dave Martin734745c2015-03-04 12:27:33 +0000531 if (pl011_dma_tx_refill(uap) <= 0)
Russell King68b65f72010-12-22 17:24:39 +0000532 /*
533 * We didn't queue a DMA buffer for some reason, but we
534 * have data pending to be sent. Re-enable the TX IRQ.
535 */
Dave Martin734745c2015-03-04 12:27:33 +0000536 pl011_start_tx_pio(uap);
537
Russell King68b65f72010-12-22 17:24:39 +0000538 spin_unlock_irqrestore(&uap->port.lock, flags);
539}
540
541/*
542 * Try to refill the TX DMA buffer.
543 * Locking: called with port lock held and IRQs disabled.
544 * Returns:
545 * 1 if we queued up a TX DMA buffer.
546 * 0 if we didn't want to handle this by DMA
547 * <0 on error
548 */
549static int pl011_dma_tx_refill(struct uart_amba_port *uap)
550{
551 struct pl011_dmatx_data *dmatx = &uap->dmatx;
552 struct dma_chan *chan = dmatx->chan;
553 struct dma_device *dma_dev = chan->device;
554 struct dma_async_tx_descriptor *desc;
555 struct circ_buf *xmit = &uap->port.state->xmit;
556 unsigned int count;
557
558 /*
559 * Try to avoid the overhead involved in using DMA if the
560 * transaction fits in the first half of the FIFO, by using
561 * the standard interrupt handling. This ensures that we
562 * issue a uart_write_wakeup() at the appropriate time.
563 */
564 count = uart_circ_chars_pending(xmit);
565 if (count < (uap->fifosize >> 1)) {
566 uap->dmatx.queued = false;
567 return 0;
568 }
569
570 /*
571 * Bodge: don't send the last character by DMA, as this
572 * will prevent XON from notifying us to restart DMA.
573 */
574 count -= 1;
575
576 /* Else proceed to copy the TX chars to the DMA buffer and fire DMA */
577 if (count > PL011_DMA_BUFFER_SIZE)
578 count = PL011_DMA_BUFFER_SIZE;
579
580 if (xmit->tail < xmit->head)
581 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], count);
582 else {
583 size_t first = UART_XMIT_SIZE - xmit->tail;
Andrew Jacksone2a545a2014-11-07 14:14:39 +0000584 size_t second;
585
586 if (first > count)
587 first = count;
588 second = count - first;
Russell King68b65f72010-12-22 17:24:39 +0000589
590 memcpy(&dmatx->buf[0], &xmit->buf[xmit->tail], first);
591 if (second)
592 memcpy(&dmatx->buf[first], &xmit->buf[0], second);
593 }
594
595 dmatx->sg.length = count;
596
597 if (dma_map_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE) != 1) {
598 uap->dmatx.queued = false;
599 dev_dbg(uap->port.dev, "unable to map TX DMA\n");
600 return -EBUSY;
601 }
602
Alexandre Bounine16052822012-03-08 16:11:18 -0500603 desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
Russell King68b65f72010-12-22 17:24:39 +0000604 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
605 if (!desc) {
606 dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
607 uap->dmatx.queued = false;
608 /*
609 * If DMA cannot be used right now, we complete this
610 * transaction via IRQ and let the TTY layer retry.
611 */
612 dev_dbg(uap->port.dev, "TX DMA busy\n");
613 return -EBUSY;
614 }
615
616 /* Some data to go along to the callback */
617 desc->callback = pl011_dma_tx_callback;
618 desc->callback_param = uap;
619
620 /* All errors should happen at prepare time */
621 dmaengine_submit(desc);
622
623 /* Fire the DMA transaction */
624 dma_dev->device_issue_pending(chan);
625
626 uap->dmacr |= UART011_TXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000627 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000628 uap->dmatx.queued = true;
629
630 /*
631 * Now we know that DMA will fire, so advance the ring buffer
632 * with the stuff we just dispatched.
633 */
634 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
635 uap->port.icount.tx += count;
636
637 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
638 uart_write_wakeup(&uap->port);
639
640 return 1;
641}
642
643/*
644 * We received a transmit interrupt without a pending X-char but with
645 * pending characters.
646 * Locking: called with port lock held and IRQs disabled.
647 * Returns:
648 * false if we want to use PIO to transmit
649 * true if we queued a DMA buffer
650 */
651static bool pl011_dma_tx_irq(struct uart_amba_port *uap)
652{
Linus Walleijead76f32011-02-24 13:21:08 +0100653 if (!uap->using_tx_dma)
Russell King68b65f72010-12-22 17:24:39 +0000654 return false;
655
656 /*
657 * If we already have a TX buffer queued, but received a
658 * TX interrupt, it will be because we've just sent an X-char.
659 * Ensure the TX DMA is enabled and the TX IRQ is disabled.
660 */
661 if (uap->dmatx.queued) {
662 uap->dmacr |= UART011_TXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000663 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000664 uap->im &= ~UART011_TXIM;
Russell King9f25bc52015-11-03 14:51:13 +0000665 pl011_write(uap->im, uap, REG_IMSC);
Russell King68b65f72010-12-22 17:24:39 +0000666 return true;
667 }
668
669 /*
670 * We don't have a TX buffer queued, so try to queue one.
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300671 * If we successfully queued a buffer, mask the TX IRQ.
Russell King68b65f72010-12-22 17:24:39 +0000672 */
673 if (pl011_dma_tx_refill(uap) > 0) {
674 uap->im &= ~UART011_TXIM;
Russell King9f25bc52015-11-03 14:51:13 +0000675 pl011_write(uap->im, uap, REG_IMSC);
Russell King68b65f72010-12-22 17:24:39 +0000676 return true;
677 }
678 return false;
679}
680
681/*
682 * Stop the DMA transmit (eg, due to received XOFF).
683 * Locking: called with port lock held and IRQs disabled.
684 */
685static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
686{
687 if (uap->dmatx.queued) {
688 uap->dmacr &= ~UART011_TXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000689 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000690 }
691}
692
693/*
694 * Try to start a DMA transmit, or in the case of an XON/OFF
695 * character queued for send, try to get that character out ASAP.
696 * Locking: called with port lock held and IRQs disabled.
697 * Returns:
698 * false if we want the TX IRQ to be enabled
699 * true if we have a buffer queued
700 */
701static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
702{
703 u16 dmacr;
704
Linus Walleijead76f32011-02-24 13:21:08 +0100705 if (!uap->using_tx_dma)
Russell King68b65f72010-12-22 17:24:39 +0000706 return false;
707
708 if (!uap->port.x_char) {
709 /* no X-char, try to push chars out in DMA mode */
710 bool ret = true;
711
712 if (!uap->dmatx.queued) {
713 if (pl011_dma_tx_refill(uap) > 0) {
714 uap->im &= ~UART011_TXIM;
Russell King9f25bc52015-11-03 14:51:13 +0000715 pl011_write(uap->im, uap, REG_IMSC);
Dave Martin734745c2015-03-04 12:27:33 +0000716 } else
Russell King68b65f72010-12-22 17:24:39 +0000717 ret = false;
Russell King68b65f72010-12-22 17:24:39 +0000718 } else if (!(uap->dmacr & UART011_TXDMAE)) {
719 uap->dmacr |= UART011_TXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000720 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000721 }
722 return ret;
723 }
724
725 /*
726 * We have an X-char to send. Disable DMA to prevent it loading
727 * the TX fifo, and then see if we can stuff it into the FIFO.
728 */
729 dmacr = uap->dmacr;
730 uap->dmacr &= ~UART011_TXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000731 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000732
Russell King9f25bc52015-11-03 14:51:13 +0000733 if (pl011_read(uap, REG_FR) & UART01x_FR_TXFF) {
Russell King68b65f72010-12-22 17:24:39 +0000734 /*
735 * No space in the FIFO, so enable the transmit interrupt
736 * so we know when there is space. Note that once we've
737 * loaded the character, we should just re-enable DMA.
738 */
739 return false;
740 }
741
Russell King9f25bc52015-11-03 14:51:13 +0000742 pl011_write(uap->port.x_char, uap, REG_DR);
Russell King68b65f72010-12-22 17:24:39 +0000743 uap->port.icount.tx++;
744 uap->port.x_char = 0;
745
746 /* Success - restore the DMA state */
747 uap->dmacr = dmacr;
Russell King9f25bc52015-11-03 14:51:13 +0000748 pl011_write(dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000749
750 return true;
751}
752
753/*
754 * Flush the transmit buffer.
755 * Locking: called with port lock held and IRQs disabled.
756 */
757static void pl011_dma_flush_buffer(struct uart_port *port)
Fabio Estevamb83286b2013-08-09 17:58:51 -0300758__releases(&uap->port.lock)
759__acquires(&uap->port.lock)
Russell King68b65f72010-12-22 17:24:39 +0000760{
Daniel Thompsona5820c22014-09-03 12:51:55 +0100761 struct uart_amba_port *uap =
762 container_of(port, struct uart_amba_port, port);
Russell King68b65f72010-12-22 17:24:39 +0000763
Linus Walleijead76f32011-02-24 13:21:08 +0100764 if (!uap->using_tx_dma)
Russell King68b65f72010-12-22 17:24:39 +0000765 return;
766
767 /* Avoid deadlock with the DMA engine callback */
768 spin_unlock(&uap->port.lock);
769 dmaengine_terminate_all(uap->dmatx.chan);
770 spin_lock(&uap->port.lock);
771 if (uap->dmatx.queued) {
772 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
773 DMA_TO_DEVICE);
774 uap->dmatx.queued = false;
775 uap->dmacr &= ~UART011_TXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000776 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +0000777 }
778}
779
Linus Walleijead76f32011-02-24 13:21:08 +0100780static void pl011_dma_rx_callback(void *data);
781
782static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
783{
784 struct dma_chan *rxchan = uap->dmarx.chan;
Linus Walleijead76f32011-02-24 13:21:08 +0100785 struct pl011_dmarx_data *dmarx = &uap->dmarx;
786 struct dma_async_tx_descriptor *desc;
787 struct pl011_sgbuf *sgbuf;
788
789 if (!rxchan)
790 return -EIO;
791
792 /* Start the RX DMA job */
793 sgbuf = uap->dmarx.use_buf_b ?
794 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
Alexandre Bounine16052822012-03-08 16:11:18 -0500795 desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
Vinod Koula485df42011-10-14 10:47:38 +0530796 DMA_DEV_TO_MEM,
Linus Walleijead76f32011-02-24 13:21:08 +0100797 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
798 /*
799 * If the DMA engine is busy and cannot prepare a
800 * channel, no big deal, the driver will fall back
801 * to interrupt mode as a result of this error code.
802 */
803 if (!desc) {
804 uap->dmarx.running = false;
805 dmaengine_terminate_all(rxchan);
806 return -EBUSY;
807 }
808
809 /* Some data to go along to the callback */
810 desc->callback = pl011_dma_rx_callback;
811 desc->callback_param = uap;
812 dmarx->cookie = dmaengine_submit(desc);
813 dma_async_issue_pending(rxchan);
814
815 uap->dmacr |= UART011_RXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000816 pl011_write(uap->dmacr, uap, REG_DMACR);
Linus Walleijead76f32011-02-24 13:21:08 +0100817 uap->dmarx.running = true;
818
819 uap->im &= ~UART011_RXIM;
Russell King9f25bc52015-11-03 14:51:13 +0000820 pl011_write(uap->im, uap, REG_IMSC);
Linus Walleijead76f32011-02-24 13:21:08 +0100821
822 return 0;
823}
824
825/*
826 * This is called when either the DMA job is complete, or
827 * the FIFO timeout interrupt occurred. This must be called
828 * with the port spinlock uap->port.lock held.
829 */
830static void pl011_dma_rx_chars(struct uart_amba_port *uap,
831 u32 pending, bool use_buf_b,
832 bool readfifo)
833{
Jiri Slaby05c7cd32013-01-03 15:53:04 +0100834 struct tty_port *port = &uap->port.state->port;
Linus Walleijead76f32011-02-24 13:21:08 +0100835 struct pl011_sgbuf *sgbuf = use_buf_b ?
836 &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
Linus Walleijead76f32011-02-24 13:21:08 +0100837 int dma_count = 0;
838 u32 fifotaken = 0; /* only used for vdbg() */
839
Chanho Mincb06ff12013-03-27 18:38:11 +0900840 struct pl011_dmarx_data *dmarx = &uap->dmarx;
841 int dmataken = 0;
842
843 if (uap->dmarx.poll_rate) {
844 /* The data can be taken by polling */
845 dmataken = sgbuf->sg.length - dmarx->last_residue;
846 /* Recalculate the pending size */
847 if (pending >= dmataken)
848 pending -= dmataken;
849 }
850
851 /* Pick the remain data from the DMA */
Linus Walleijead76f32011-02-24 13:21:08 +0100852 if (pending) {
Linus Walleijead76f32011-02-24 13:21:08 +0100853
854 /*
855 * First take all chars in the DMA pipe, then look in the FIFO.
856 * Note that tty_insert_flip_buf() tries to take as many chars
857 * as it can.
858 */
Chanho Mincb06ff12013-03-27 18:38:11 +0900859 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
860 pending);
Linus Walleijead76f32011-02-24 13:21:08 +0100861
862 uap->port.icount.rx += dma_count;
863 if (dma_count < pending)
864 dev_warn(uap->port.dev,
865 "couldn't insert all characters (TTY is full?)\n");
866 }
867
Chanho Mincb06ff12013-03-27 18:38:11 +0900868 /* Reset the last_residue for Rx DMA poll */
869 if (uap->dmarx.poll_rate)
870 dmarx->last_residue = sgbuf->sg.length;
871
Linus Walleijead76f32011-02-24 13:21:08 +0100872 /*
873 * Only continue with trying to read the FIFO if all DMA chars have
874 * been taken first.
875 */
876 if (dma_count == pending && readfifo) {
877 /* Clear any error flags */
Russell King75836332015-11-03 14:50:58 +0000878 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
Russell King9f25bc52015-11-03 14:51:13 +0000879 UART011_FEIS, uap, REG_ICR);
Linus Walleijead76f32011-02-24 13:21:08 +0100880
881 /*
882 * If we read all the DMA'd characters, and we had an
Linus Walleij29772c42011-02-24 13:21:36 +0100883 * incomplete buffer, that could be due to an rx error, or
884 * maybe we just timed out. Read any pending chars and check
885 * the error status.
886 *
887 * Error conditions will only occur in the FIFO, these will
888 * trigger an immediate interrupt and stop the DMA job, so we
889 * will always find the error in the FIFO, never in the DMA
890 * buffer.
Linus Walleijead76f32011-02-24 13:21:08 +0100891 */
Linus Walleij29772c42011-02-24 13:21:36 +0100892 fifotaken = pl011_fifo_to_tty(uap);
Linus Walleijead76f32011-02-24 13:21:08 +0100893 }
894
895 spin_unlock(&uap->port.lock);
896 dev_vdbg(uap->port.dev,
897 "Took %d chars from DMA buffer and %d chars from the FIFO\n",
898 dma_count, fifotaken);
Jiri Slaby2e124b42013-01-03 15:53:06 +0100899 tty_flip_buffer_push(port);
Linus Walleijead76f32011-02-24 13:21:08 +0100900 spin_lock(&uap->port.lock);
901}
902
903static void pl011_dma_rx_irq(struct uart_amba_port *uap)
904{
905 struct pl011_dmarx_data *dmarx = &uap->dmarx;
906 struct dma_chan *rxchan = dmarx->chan;
907 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
908 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
909 size_t pending;
910 struct dma_tx_state state;
911 enum dma_status dmastat;
912
913 /*
914 * Pause the transfer so we can trust the current counter,
915 * do this before we pause the PL011 block, else we may
916 * overflow the FIFO.
917 */
918 if (dmaengine_pause(rxchan))
919 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
920 dmastat = rxchan->device->device_tx_status(rxchan,
921 dmarx->cookie, &state);
922 if (dmastat != DMA_PAUSED)
923 dev_err(uap->port.dev, "unable to pause DMA transfer\n");
924
925 /* Disable RX DMA - incoming data will wait in the FIFO */
926 uap->dmacr &= ~UART011_RXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +0000927 pl011_write(uap->dmacr, uap, REG_DMACR);
Linus Walleijead76f32011-02-24 13:21:08 +0100928 uap->dmarx.running = false;
929
930 pending = sgbuf->sg.length - state.residue;
931 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
932 /* Then we terminate the transfer - we now know our residue */
933 dmaengine_terminate_all(rxchan);
934
935 /*
936 * This will take the chars we have so far and insert
937 * into the framework.
938 */
939 pl011_dma_rx_chars(uap, pending, dmarx->use_buf_b, true);
940
941 /* Switch buffer & re-trigger DMA job */
942 dmarx->use_buf_b = !dmarx->use_buf_b;
943 if (pl011_dma_rx_trigger_dma(uap)) {
944 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
945 "fall back to interrupt mode\n");
946 uap->im |= UART011_RXIM;
Russell King9f25bc52015-11-03 14:51:13 +0000947 pl011_write(uap->im, uap, REG_IMSC);
Linus Walleijead76f32011-02-24 13:21:08 +0100948 }
949}
950
951static void pl011_dma_rx_callback(void *data)
952{
953 struct uart_amba_port *uap = data;
954 struct pl011_dmarx_data *dmarx = &uap->dmarx;
Chanho Min6dc01aa2012-02-20 10:24:40 +0900955 struct dma_chan *rxchan = dmarx->chan;
Linus Walleijead76f32011-02-24 13:21:08 +0100956 bool lastbuf = dmarx->use_buf_b;
Chanho Min6dc01aa2012-02-20 10:24:40 +0900957 struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
958 &dmarx->sgbuf_b : &dmarx->sgbuf_a;
959 size_t pending;
960 struct dma_tx_state state;
Linus Walleijead76f32011-02-24 13:21:08 +0100961 int ret;
962
963 /*
964 * This completion interrupt occurs typically when the
965 * RX buffer is totally stuffed but no timeout has yet
966 * occurred. When that happens, we just want the RX
967 * routine to flush out the secondary DMA buffer while
968 * we immediately trigger the next DMA job.
969 */
970 spin_lock_irq(&uap->port.lock);
Chanho Min6dc01aa2012-02-20 10:24:40 +0900971 /*
972 * Rx data can be taken by the UART interrupts during
973 * the DMA irq handler. So we check the residue here.
974 */
975 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
976 pending = sgbuf->sg.length - state.residue;
977 BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
978 /* Then we terminate the transfer - we now know our residue */
979 dmaengine_terminate_all(rxchan);
980
Linus Walleijead76f32011-02-24 13:21:08 +0100981 uap->dmarx.running = false;
982 dmarx->use_buf_b = !lastbuf;
983 ret = pl011_dma_rx_trigger_dma(uap);
984
Chanho Min6dc01aa2012-02-20 10:24:40 +0900985 pl011_dma_rx_chars(uap, pending, lastbuf, false);
Linus Walleijead76f32011-02-24 13:21:08 +0100986 spin_unlock_irq(&uap->port.lock);
987 /*
988 * Do this check after we picked the DMA chars so we don't
989 * get some IRQ immediately from RX.
990 */
991 if (ret) {
992 dev_dbg(uap->port.dev, "could not retrigger RX DMA job "
993 "fall back to interrupt mode\n");
994 uap->im |= UART011_RXIM;
Russell King9f25bc52015-11-03 14:51:13 +0000995 pl011_write(uap->im, uap, REG_IMSC);
Linus Walleijead76f32011-02-24 13:21:08 +0100996 }
997}
998
999/*
1000 * Stop accepting received characters, when we're shutting down or
1001 * suspending this port.
1002 * Locking: called with port lock held and IRQs disabled.
1003 */
1004static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1005{
1006 /* FIXME. Just disable the DMA enable */
1007 uap->dmacr &= ~UART011_RXDMAE;
Russell King9f25bc52015-11-03 14:51:13 +00001008 pl011_write(uap->dmacr, uap, REG_DMACR);
Linus Walleijead76f32011-02-24 13:21:08 +01001009}
Russell King68b65f72010-12-22 17:24:39 +00001010
Chanho Mincb06ff12013-03-27 18:38:11 +09001011/*
1012 * Timer handler for Rx DMA polling.
1013 * Every polling, It checks the residue in the dma buffer and transfer
1014 * data to the tty. Also, last_residue is updated for the next polling.
1015 */
1016static void pl011_dma_rx_poll(unsigned long args)
1017{
1018 struct uart_amba_port *uap = (struct uart_amba_port *)args;
1019 struct tty_port *port = &uap->port.state->port;
1020 struct pl011_dmarx_data *dmarx = &uap->dmarx;
1021 struct dma_chan *rxchan = uap->dmarx.chan;
1022 unsigned long flags = 0;
1023 unsigned int dmataken = 0;
1024 unsigned int size = 0;
1025 struct pl011_sgbuf *sgbuf;
1026 int dma_count;
1027 struct dma_tx_state state;
1028
1029 sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
1030 rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
1031 if (likely(state.residue < dmarx->last_residue)) {
1032 dmataken = sgbuf->sg.length - dmarx->last_residue;
1033 size = dmarx->last_residue - state.residue;
1034 dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
1035 size);
1036 if (dma_count == size)
1037 dmarx->last_residue = state.residue;
1038 dmarx->last_jiffies = jiffies;
1039 }
1040 tty_flip_buffer_push(port);
1041
1042 /*
1043 * If no data is received in poll_timeout, the driver will fall back
1044 * to interrupt mode. We will retrigger DMA at the first interrupt.
1045 */
1046 if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
1047 > uap->dmarx.poll_timeout) {
1048
1049 spin_lock_irqsave(&uap->port.lock, flags);
1050 pl011_dma_rx_stop(uap);
Guennadi Liakhovetskic25a1ad2013-12-10 14:54:47 +01001051 uap->im |= UART011_RXIM;
Russell King9f25bc52015-11-03 14:51:13 +00001052 pl011_write(uap->im, uap, REG_IMSC);
Chanho Mincb06ff12013-03-27 18:38:11 +09001053 spin_unlock_irqrestore(&uap->port.lock, flags);
1054
1055 uap->dmarx.running = false;
1056 dmaengine_terminate_all(rxchan);
1057 del_timer(&uap->dmarx.timer);
1058 } else {
1059 mod_timer(&uap->dmarx.timer,
1060 jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
1061 }
1062}
1063
Russell King68b65f72010-12-22 17:24:39 +00001064static void pl011_dma_startup(struct uart_amba_port *uap)
1065{
Linus Walleijead76f32011-02-24 13:21:08 +01001066 int ret;
1067
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -05001068 if (!uap->dma_probed)
1069 pl011_dma_probe(uap);
1070
Russell King68b65f72010-12-22 17:24:39 +00001071 if (!uap->dmatx.chan)
1072 return;
1073
Andrew Jackson4c0be452014-11-07 14:14:35 +00001074 uap->dmatx.buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL | __GFP_DMA);
Russell King68b65f72010-12-22 17:24:39 +00001075 if (!uap->dmatx.buf) {
1076 dev_err(uap->port.dev, "no memory for DMA TX buffer\n");
1077 uap->port.fifosize = uap->fifosize;
1078 return;
1079 }
1080
1081 sg_init_one(&uap->dmatx.sg, uap->dmatx.buf, PL011_DMA_BUFFER_SIZE);
1082
1083 /* The DMA buffer is now the FIFO the TTY subsystem can use */
1084 uap->port.fifosize = PL011_DMA_BUFFER_SIZE;
Linus Walleijead76f32011-02-24 13:21:08 +01001085 uap->using_tx_dma = true;
Russell King68b65f72010-12-22 17:24:39 +00001086
Linus Walleijead76f32011-02-24 13:21:08 +01001087 if (!uap->dmarx.chan)
1088 goto skip_rx;
1089
1090 /* Allocate and map DMA RX buffers */
1091 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1092 DMA_FROM_DEVICE);
1093 if (ret) {
1094 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1095 "RX buffer A", ret);
1096 goto skip_rx;
1097 }
1098
1099 ret = pl011_sgbuf_init(uap->dmarx.chan, &uap->dmarx.sgbuf_b,
1100 DMA_FROM_DEVICE);
1101 if (ret) {
1102 dev_err(uap->port.dev, "failed to init DMA %s: %d\n",
1103 "RX buffer B", ret);
1104 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a,
1105 DMA_FROM_DEVICE);
1106 goto skip_rx;
1107 }
1108
1109 uap->using_rx_dma = true;
1110
1111skip_rx:
Russell King68b65f72010-12-22 17:24:39 +00001112 /* Turn on DMA error (RX/TX will be enabled on demand) */
1113 uap->dmacr |= UART011_DMAONERR;
Russell King9f25bc52015-11-03 14:51:13 +00001114 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King38d62432010-12-22 17:59:16 +00001115
1116 /*
1117 * ST Micro variants has some specific dma burst threshold
1118 * compensation. Set this to 16 bytes, so burst will only
1119 * be issued above/below 16 bytes.
1120 */
1121 if (uap->vendor->dma_threshold)
Russell King75836332015-11-03 14:50:58 +00001122 pl011_write(ST_UART011_DMAWM_RX_16 | ST_UART011_DMAWM_TX_16,
Russell King9f25bc52015-11-03 14:51:13 +00001123 uap, REG_ST_DMAWM);
Linus Walleijead76f32011-02-24 13:21:08 +01001124
1125 if (uap->using_rx_dma) {
1126 if (pl011_dma_rx_trigger_dma(uap))
1127 dev_dbg(uap->port.dev, "could not trigger initial "
1128 "RX DMA job, fall back to interrupt mode\n");
Chanho Mincb06ff12013-03-27 18:38:11 +09001129 if (uap->dmarx.poll_rate) {
1130 init_timer(&(uap->dmarx.timer));
1131 uap->dmarx.timer.function = pl011_dma_rx_poll;
1132 uap->dmarx.timer.data = (unsigned long)uap;
1133 mod_timer(&uap->dmarx.timer,
1134 jiffies +
1135 msecs_to_jiffies(uap->dmarx.poll_rate));
1136 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1137 uap->dmarx.last_jiffies = jiffies;
1138 }
Linus Walleijead76f32011-02-24 13:21:08 +01001139 }
Russell King68b65f72010-12-22 17:24:39 +00001140}
1141
1142static void pl011_dma_shutdown(struct uart_amba_port *uap)
1143{
Linus Walleijead76f32011-02-24 13:21:08 +01001144 if (!(uap->using_tx_dma || uap->using_rx_dma))
Russell King68b65f72010-12-22 17:24:39 +00001145 return;
1146
1147 /* Disable RX and TX DMA */
Russell King9f25bc52015-11-03 14:51:13 +00001148 while (pl011_read(uap, REG_FR) & UART01x_FR_BUSY)
Russell King68b65f72010-12-22 17:24:39 +00001149 barrier();
1150
1151 spin_lock_irq(&uap->port.lock);
1152 uap->dmacr &= ~(UART011_DMAONERR | UART011_RXDMAE | UART011_TXDMAE);
Russell King9f25bc52015-11-03 14:51:13 +00001153 pl011_write(uap->dmacr, uap, REG_DMACR);
Russell King68b65f72010-12-22 17:24:39 +00001154 spin_unlock_irq(&uap->port.lock);
1155
Linus Walleijead76f32011-02-24 13:21:08 +01001156 if (uap->using_tx_dma) {
1157 /* In theory, this should already be done by pl011_dma_flush_buffer */
1158 dmaengine_terminate_all(uap->dmatx.chan);
1159 if (uap->dmatx.queued) {
1160 dma_unmap_sg(uap->dmatx.chan->device->dev, &uap->dmatx.sg, 1,
1161 DMA_TO_DEVICE);
1162 uap->dmatx.queued = false;
1163 }
1164
1165 kfree(uap->dmatx.buf);
1166 uap->using_tx_dma = false;
Russell King68b65f72010-12-22 17:24:39 +00001167 }
1168
Linus Walleijead76f32011-02-24 13:21:08 +01001169 if (uap->using_rx_dma) {
1170 dmaengine_terminate_all(uap->dmarx.chan);
1171 /* Clean up the RX DMA */
1172 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
1173 pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
Chanho Mincb06ff12013-03-27 18:38:11 +09001174 if (uap->dmarx.poll_rate)
1175 del_timer_sync(&uap->dmarx.timer);
Linus Walleijead76f32011-02-24 13:21:08 +01001176 uap->using_rx_dma = false;
1177 }
Russell King68b65f72010-12-22 17:24:39 +00001178}
1179
Linus Walleijead76f32011-02-24 13:21:08 +01001180static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1181{
1182 return uap->using_rx_dma;
1183}
1184
1185static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1186{
1187 return uap->using_rx_dma && uap->dmarx.running;
1188}
1189
Russell King68b65f72010-12-22 17:24:39 +00001190#else
1191/* Blank functions if the DMA engine is not available */
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -05001192static inline void pl011_dma_probe(struct uart_amba_port *uap)
Russell King68b65f72010-12-22 17:24:39 +00001193{
1194}
1195
1196static inline void pl011_dma_remove(struct uart_amba_port *uap)
1197{
1198}
1199
1200static inline void pl011_dma_startup(struct uart_amba_port *uap)
1201{
1202}
1203
1204static inline void pl011_dma_shutdown(struct uart_amba_port *uap)
1205{
1206}
1207
1208static inline bool pl011_dma_tx_irq(struct uart_amba_port *uap)
1209{
1210 return false;
1211}
1212
1213static inline void pl011_dma_tx_stop(struct uart_amba_port *uap)
1214{
1215}
1216
1217static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
1218{
1219 return false;
1220}
1221
Linus Walleijead76f32011-02-24 13:21:08 +01001222static inline void pl011_dma_rx_irq(struct uart_amba_port *uap)
1223{
1224}
1225
1226static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
1227{
1228}
1229
1230static inline int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
1231{
1232 return -EIO;
1233}
1234
1235static inline bool pl011_dma_rx_available(struct uart_amba_port *uap)
1236{
1237 return false;
1238}
1239
1240static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
1241{
1242 return false;
1243}
1244
Russell King68b65f72010-12-22 17:24:39 +00001245#define pl011_dma_flush_buffer NULL
1246#endif
1247
Russell Kingb129a8c2005-08-31 10:12:14 +01001248static void pl011_stop_tx(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001250 struct uart_amba_port *uap =
1251 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
1253 uap->im &= ~UART011_TXIM;
Russell King9f25bc52015-11-03 14:51:13 +00001254 pl011_write(uap->im, uap, REG_IMSC);
Russell King68b65f72010-12-22 17:24:39 +00001255 pl011_dma_tx_stop(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256}
1257
Dave Martin1e84d222015-04-27 16:49:05 +01001258static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq);
Dave Martin734745c2015-03-04 12:27:33 +00001259
1260/* Start TX with programmed I/O only (no DMA) */
1261static void pl011_start_tx_pio(struct uart_amba_port *uap)
1262{
1263 uap->im |= UART011_TXIM;
Russell King9f25bc52015-11-03 14:51:13 +00001264 pl011_write(uap->im, uap, REG_IMSC);
Dave Martin1e84d222015-04-27 16:49:05 +01001265 pl011_tx_chars(uap, false);
Dave Martin734745c2015-03-04 12:27:33 +00001266}
1267
Russell Kingb129a8c2005-08-31 10:12:14 +01001268static void pl011_start_tx(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001270 struct uart_amba_port *uap =
1271 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Dave Martin734745c2015-03-04 12:27:33 +00001273 if (!pl011_dma_tx_start(uap))
1274 pl011_start_tx_pio(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275}
1276
1277static void pl011_stop_rx(struct uart_port *port)
1278{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001279 struct uart_amba_port *uap =
1280 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
1282 uap->im &= ~(UART011_RXIM|UART011_RTIM|UART011_FEIM|
1283 UART011_PEIM|UART011_BEIM|UART011_OEIM);
Russell King9f25bc52015-11-03 14:51:13 +00001284 pl011_write(uap->im, uap, REG_IMSC);
Linus Walleijead76f32011-02-24 13:21:08 +01001285
1286 pl011_dma_rx_stop(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287}
1288
1289static void pl011_enable_ms(struct uart_port *port)
1290{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001291 struct uart_amba_port *uap =
1292 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294 uap->im |= UART011_RIMIM|UART011_CTSMIM|UART011_DCDMIM|UART011_DSRMIM;
Russell King9f25bc52015-11-03 14:51:13 +00001295 pl011_write(uap->im, uap, REG_IMSC);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296}
1297
David Howells7d12e782006-10-05 14:55:46 +01001298static void pl011_rx_chars(struct uart_amba_port *uap)
Fabio Estevamb83286b2013-08-09 17:58:51 -03001299__releases(&uap->port.lock)
1300__acquires(&uap->port.lock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
Linus Walleij29772c42011-02-24 13:21:36 +01001302 pl011_fifo_to_tty(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
Thomas Gleixner2389b272007-05-29 21:53:50 +01001304 spin_unlock(&uap->port.lock);
Jiri Slaby2e124b42013-01-03 15:53:06 +01001305 tty_flip_buffer_push(&uap->port.state->port);
Linus Walleijead76f32011-02-24 13:21:08 +01001306 /*
1307 * If we were temporarily out of DMA mode for a while,
1308 * attempt to switch back to DMA mode again.
1309 */
1310 if (pl011_dma_rx_available(uap)) {
1311 if (pl011_dma_rx_trigger_dma(uap)) {
1312 dev_dbg(uap->port.dev, "could not trigger RX DMA job "
1313 "fall back to interrupt mode again\n");
1314 uap->im |= UART011_RXIM;
Russell King9f25bc52015-11-03 14:51:13 +00001315 pl011_write(uap->im, uap, REG_IMSC);
Chanho Mincb06ff12013-03-27 18:38:11 +09001316 } else {
Chanho Min89fa28d2013-04-03 11:10:37 +09001317#ifdef CONFIG_DMA_ENGINE
Chanho Mincb06ff12013-03-27 18:38:11 +09001318 /* Start Rx DMA poll */
1319 if (uap->dmarx.poll_rate) {
1320 uap->dmarx.last_jiffies = jiffies;
1321 uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
1322 mod_timer(&uap->dmarx.timer,
1323 jiffies +
1324 msecs_to_jiffies(uap->dmarx.poll_rate));
1325 }
Chanho Min89fa28d2013-04-03 11:10:37 +09001326#endif
Chanho Mincb06ff12013-03-27 18:38:11 +09001327 }
Linus Walleijead76f32011-02-24 13:21:08 +01001328 }
Thomas Gleixner2389b272007-05-29 21:53:50 +01001329 spin_lock(&uap->port.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330}
1331
Dave Martin1e84d222015-04-27 16:49:05 +01001332static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c,
1333 bool from_irq)
Dave Martin734745c2015-03-04 12:27:33 +00001334{
Dave Martin1e84d222015-04-27 16:49:05 +01001335 if (unlikely(!from_irq) &&
Russell King9f25bc52015-11-03 14:51:13 +00001336 pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
Dave Martin1e84d222015-04-27 16:49:05 +01001337 return false; /* unable to transmit character */
1338
Russell King9f25bc52015-11-03 14:51:13 +00001339 pl011_write(c, uap, REG_DR);
Dave Martin734745c2015-03-04 12:27:33 +00001340 uap->port.icount.tx++;
1341
Dave Martin1e84d222015-04-27 16:49:05 +01001342 return true;
Dave Martin734745c2015-03-04 12:27:33 +00001343}
1344
Dave Martin1e84d222015-04-27 16:49:05 +01001345static void pl011_tx_chars(struct uart_amba_port *uap, bool from_irq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346{
Alan Coxebd2c8f2009-09-19 13:13:28 -07001347 struct circ_buf *xmit = &uap->port.state->xmit;
Dave Martin1e84d222015-04-27 16:49:05 +01001348 int count = uap->fifosize >> 1;
Dave Martin734745c2015-03-04 12:27:33 +00001349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if (uap->port.x_char) {
Dave Martin1e84d222015-04-27 16:49:05 +01001351 if (!pl011_tx_char(uap, uap->port.x_char, from_irq))
1352 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 uap->port.x_char = 0;
Dave Martin734745c2015-03-04 12:27:33 +00001354 --count;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 }
1356 if (uart_circ_empty(xmit) || uart_tx_stopped(&uap->port)) {
Russell Kingb129a8c2005-08-31 10:12:14 +01001357 pl011_stop_tx(&uap->port);
Dave Martin1e84d222015-04-27 16:49:05 +01001358 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 }
1360
Russell King68b65f72010-12-22 17:24:39 +00001361 /* If we are using DMA mode, try to send some characters. */
1362 if (pl011_dma_tx_irq(uap))
Dave Martin1e84d222015-04-27 16:49:05 +01001363 return;
Russell King68b65f72010-12-22 17:24:39 +00001364
Dave Martin1e84d222015-04-27 16:49:05 +01001365 do {
1366 if (likely(from_irq) && count-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 break;
Dave Martin1e84d222015-04-27 16:49:05 +01001368
1369 if (!pl011_tx_char(uap, xmit->buf[xmit->tail], from_irq))
1370 break;
1371
1372 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
1373 } while (!uart_circ_empty(xmit));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374
1375 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
1376 uart_write_wakeup(&uap->port);
1377
Dave Martin1e84d222015-04-27 16:49:05 +01001378 if (uart_circ_empty(xmit))
Russell Kingb129a8c2005-08-31 10:12:14 +01001379 pl011_stop_tx(&uap->port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380}
1381
1382static void pl011_modem_status(struct uart_amba_port *uap)
1383{
1384 unsigned int status, delta;
1385
Russell King9f25bc52015-11-03 14:51:13 +00001386 status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
1388 delta = status ^ uap->old_status;
1389 uap->old_status = status;
1390
1391 if (!delta)
1392 return;
1393
1394 if (delta & UART01x_FR_DCD)
1395 uart_handle_dcd_change(&uap->port, status & UART01x_FR_DCD);
1396
Greg Kroah-Hartman062a68a2015-09-04 09:11:24 -07001397 if (delta & UART01x_FR_DSR)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 uap->port.icount.dsr++;
1399
Greg Kroah-Hartman062a68a2015-09-04 09:11:24 -07001400 if (delta & UART01x_FR_CTS)
1401 uart_handle_cts_change(&uap->port, status & UART01x_FR_CTS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Alan Coxbdc04e32009-09-19 13:13:31 -07001403 wake_up_interruptible(&uap->port.state->port.delta_msr_wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404}
1405
Andre Przywara9c4ef4b2015-05-21 17:26:20 +01001406static void check_apply_cts_event_workaround(struct uart_amba_port *uap)
1407{
1408 unsigned int dummy_read;
1409
1410 if (!uap->vendor->cts_event_workaround)
1411 return;
1412
1413 /* workaround to make sure that all bits are unlocked.. */
Russell King9f25bc52015-11-03 14:51:13 +00001414 pl011_write(0x00, uap, REG_ICR);
Andre Przywara9c4ef4b2015-05-21 17:26:20 +01001415
1416 /*
1417 * WA: introduce 26ns(1 uart clk) delay before W1C;
1418 * single apb access will incur 2 pclk(133.12Mhz) delay,
1419 * so add 2 dummy reads
1420 */
Russell King9f25bc52015-11-03 14:51:13 +00001421 dummy_read = pl011_read(uap, REG_ICR);
1422 dummy_read = pl011_read(uap, REG_ICR);
Andre Przywara9c4ef4b2015-05-21 17:26:20 +01001423}
1424
David Howells7d12e782006-10-05 14:55:46 +01001425static irqreturn_t pl011_int(int irq, void *dev_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426{
1427 struct uart_amba_port *uap = dev_id;
Russell King963cc982010-12-22 17:16:09 +00001428 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
Andre Przywara075167e2015-05-21 17:26:19 +01001430 u16 imsc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 int handled = 0;
1432
Russell King963cc982010-12-22 17:16:09 +00001433 spin_lock_irqsave(&uap->port.lock, flags);
Russell King9f25bc52015-11-03 14:51:13 +00001434 imsc = pl011_read(uap, REG_IMSC);
1435 status = pl011_read(uap, REG_RIS) & imsc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 if (status) {
1437 do {
Andre Przywara9c4ef4b2015-05-21 17:26:20 +01001438 check_apply_cts_event_workaround(uap);
Greg Kroah-Hartmanf11c9842015-09-04 09:13:39 -07001439
Russell King75836332015-11-03 14:50:58 +00001440 pl011_write(status & ~(UART011_TXIS|UART011_RTIS|
1441 UART011_RXIS),
Russell King9f25bc52015-11-03 14:51:13 +00001442 uap, REG_ICR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Linus Walleijead76f32011-02-24 13:21:08 +01001444 if (status & (UART011_RTIS|UART011_RXIS)) {
1445 if (pl011_dma_rx_running(uap))
1446 pl011_dma_rx_irq(uap);
1447 else
1448 pl011_rx_chars(uap);
1449 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 if (status & (UART011_DSRMIS|UART011_DCDMIS|
1451 UART011_CTSMIS|UART011_RIMIS))
1452 pl011_modem_status(uap);
Dave Martin1e84d222015-04-27 16:49:05 +01001453 if (status & UART011_TXIS)
1454 pl011_tx_chars(uap, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
Rajanikanth H.V4fd06902012-03-26 11:17:02 +02001456 if (pass_counter-- == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457 break;
1458
Russell King9f25bc52015-11-03 14:51:13 +00001459 status = pl011_read(uap, REG_RIS) & imsc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 } while (status != 0);
1461 handled = 1;
1462 }
1463
Russell King963cc982010-12-22 17:16:09 +00001464 spin_unlock_irqrestore(&uap->port.lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 return IRQ_RETVAL(handled);
1467}
1468
Linus Walleije643f872012-06-17 15:44:19 +02001469static unsigned int pl011_tx_empty(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001471 struct uart_amba_port *uap =
1472 container_of(port, struct uart_amba_port, port);
Russell King9f25bc52015-11-03 14:51:13 +00001473 unsigned int status = pl011_read(uap, REG_FR);
Greg Kroah-Hartman062a68a2015-09-04 09:11:24 -07001474 return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475}
1476
Linus Walleije643f872012-06-17 15:44:19 +02001477static unsigned int pl011_get_mctrl(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001479 struct uart_amba_port *uap =
1480 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 unsigned int result = 0;
Russell King9f25bc52015-11-03 14:51:13 +00001482 unsigned int status = pl011_read(uap, REG_FR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Jiri Slaby5159f402007-10-18 23:40:31 -07001484#define TIOCMBIT(uartbit, tiocmbit) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 if (status & uartbit) \
1486 result |= tiocmbit
1487
Jiri Slaby5159f402007-10-18 23:40:31 -07001488 TIOCMBIT(UART01x_FR_DCD, TIOCM_CAR);
Greg Kroah-Hartman062a68a2015-09-04 09:11:24 -07001489 TIOCMBIT(UART01x_FR_DSR, TIOCM_DSR);
1490 TIOCMBIT(UART01x_FR_CTS, TIOCM_CTS);
1491 TIOCMBIT(UART011_FR_RI, TIOCM_RNG);
Jiri Slaby5159f402007-10-18 23:40:31 -07001492#undef TIOCMBIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 return result;
1494}
1495
1496static void pl011_set_mctrl(struct uart_port *port, unsigned int mctrl)
1497{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001498 struct uart_amba_port *uap =
1499 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 unsigned int cr;
1501
Russell King9f25bc52015-11-03 14:51:13 +00001502 cr = pl011_read(uap, REG_CR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503
Jiri Slaby5159f402007-10-18 23:40:31 -07001504#define TIOCMBIT(tiocmbit, uartbit) \
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 if (mctrl & tiocmbit) \
1506 cr |= uartbit; \
1507 else \
1508 cr &= ~uartbit
1509
Jiri Slaby5159f402007-10-18 23:40:31 -07001510 TIOCMBIT(TIOCM_RTS, UART011_CR_RTS);
1511 TIOCMBIT(TIOCM_DTR, UART011_CR_DTR);
1512 TIOCMBIT(TIOCM_OUT1, UART011_CR_OUT1);
1513 TIOCMBIT(TIOCM_OUT2, UART011_CR_OUT2);
1514 TIOCMBIT(TIOCM_LOOP, UART011_CR_LBE);
Rabin Vincent3b438162010-02-12 06:43:11 +01001515
1516 if (uap->autorts) {
1517 /* We need to disable auto-RTS if we want to turn RTS off */
1518 TIOCMBIT(TIOCM_RTS, UART011_CR_RTSEN);
1519 }
Jiri Slaby5159f402007-10-18 23:40:31 -07001520#undef TIOCMBIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
Russell King9f25bc52015-11-03 14:51:13 +00001522 pl011_write(cr, uap, REG_CR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523}
1524
1525static void pl011_break_ctl(struct uart_port *port, int break_state)
1526{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001527 struct uart_amba_port *uap =
1528 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 unsigned long flags;
1530 unsigned int lcr_h;
1531
1532 spin_lock_irqsave(&uap->port.lock, flags);
Russell Kinge4df9a82015-11-16 17:40:41 +00001533 lcr_h = pl011_read(uap, REG_LCRH_TX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 if (break_state == -1)
1535 lcr_h |= UART01x_LCRH_BRK;
1536 else
1537 lcr_h &= ~UART01x_LCRH_BRK;
Russell Kinge4df9a82015-11-16 17:40:41 +00001538 pl011_write(lcr_h, uap, REG_LCRH_TX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 spin_unlock_irqrestore(&uap->port.lock, flags);
1540}
1541
Jason Wessel84b5ae12008-02-20 13:33:39 -06001542#ifdef CONFIG_CONSOLE_POLL
Anton Vorontsov5c8124a2012-09-24 14:27:55 -07001543
1544static void pl011_quiesce_irqs(struct uart_port *port)
1545{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001546 struct uart_amba_port *uap =
1547 container_of(port, struct uart_amba_port, port);
Anton Vorontsov5c8124a2012-09-24 14:27:55 -07001548
Russell King9f25bc52015-11-03 14:51:13 +00001549 pl011_write(pl011_read(uap, REG_MIS), uap, REG_ICR);
Anton Vorontsov5c8124a2012-09-24 14:27:55 -07001550 /*
1551 * There is no way to clear TXIM as this is "ready to transmit IRQ", so
1552 * we simply mask it. start_tx() will unmask it.
1553 *
1554 * Note we can race with start_tx(), and if the race happens, the
1555 * polling user might get another interrupt just after we clear it.
1556 * But it should be OK and can happen even w/o the race, e.g.
1557 * controller immediately got some new data and raised the IRQ.
1558 *
1559 * And whoever uses polling routines assumes that it manages the device
1560 * (including tx queue), so we're also fine with start_tx()'s caller
1561 * side.
1562 */
Russell King9f25bc52015-11-03 14:51:13 +00001563 pl011_write(pl011_read(uap, REG_IMSC) & ~UART011_TXIM, uap,
1564 REG_IMSC);
Anton Vorontsov5c8124a2012-09-24 14:27:55 -07001565}
1566
Linus Walleije643f872012-06-17 15:44:19 +02001567static int pl011_get_poll_char(struct uart_port *port)
Jason Wessel84b5ae12008-02-20 13:33:39 -06001568{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001569 struct uart_amba_port *uap =
1570 container_of(port, struct uart_amba_port, port);
Jason Wessel84b5ae12008-02-20 13:33:39 -06001571 unsigned int status;
1572
Anton Vorontsov5c8124a2012-09-24 14:27:55 -07001573 /*
1574 * The caller might need IRQs lowered, e.g. if used with KDB NMI
1575 * debugger.
1576 */
1577 pl011_quiesce_irqs(port);
1578
Russell King9f25bc52015-11-03 14:51:13 +00001579 status = pl011_read(uap, REG_FR);
Jason Wesself5316b42010-05-20 21:04:22 -05001580 if (status & UART01x_FR_RXFE)
1581 return NO_POLL_CHAR;
Jason Wessel84b5ae12008-02-20 13:33:39 -06001582
Russell King9f25bc52015-11-03 14:51:13 +00001583 return pl011_read(uap, REG_DR);
Jason Wessel84b5ae12008-02-20 13:33:39 -06001584}
1585
Linus Walleije643f872012-06-17 15:44:19 +02001586static void pl011_put_poll_char(struct uart_port *port,
Jason Wessel84b5ae12008-02-20 13:33:39 -06001587 unsigned char ch)
1588{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001589 struct uart_amba_port *uap =
1590 container_of(port, struct uart_amba_port, port);
Jason Wessel84b5ae12008-02-20 13:33:39 -06001591
Russell King9f25bc52015-11-03 14:51:13 +00001592 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
Jason Wessel84b5ae12008-02-20 13:33:39 -06001593 barrier();
1594
Russell King9f25bc52015-11-03 14:51:13 +00001595 pl011_write(ch, uap, REG_DR);
Jason Wessel84b5ae12008-02-20 13:33:39 -06001596}
1597
1598#endif /* CONFIG_CONSOLE_POLL */
1599
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001600static int pl011_hwinit(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001602 struct uart_amba_port *uap =
1603 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 int retval;
1605
Linus Walleij78d80c52012-05-23 21:18:46 +02001606 /* Optionaly enable pins to be muxed in and configured */
Linus Walleij2b996fc2013-06-05 15:36:42 +02001607 pinctrl_pm_select_default_state(port->dev);
Linus Walleij78d80c52012-05-23 21:18:46 +02001608
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 /*
1610 * Try to enable the clock producer.
1611 */
Julia Lawall1c4c4392012-08-26 18:01:01 +02001612 retval = clk_prepare_enable(uap->clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 if (retval)
Tushar Behera7f6d9422014-06-26 15:35:35 +05301614 return retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 uap->port.uartclk = clk_get_rate(uap->clk);
1617
Linus Walleij9b96fba2012-03-13 13:27:23 +01001618 /* Clear pending error and receive interrupts */
Russell King75836332015-11-03 14:50:58 +00001619 pl011_write(UART011_OEIS | UART011_BEIS | UART011_PEIS |
1620 UART011_FEIS | UART011_RTIS | UART011_RXIS,
Russell King9f25bc52015-11-03 14:51:13 +00001621 uap, REG_ICR);
Linus Walleij9b96fba2012-03-13 13:27:23 +01001622
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623 /*
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001624 * Save interrupts enable mask, and enable RX interrupts in case if
1625 * the interrupt is used for NMI entry.
1626 */
Russell King9f25bc52015-11-03 14:51:13 +00001627 uap->im = pl011_read(uap, REG_IMSC);
1628 pl011_write(UART011_RTIM | UART011_RXIM, uap, REG_IMSC);
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001629
Jingoo Han574de552013-07-30 17:06:57 +09001630 if (dev_get_platdata(uap->port.dev)) {
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001631 struct amba_pl011_data *plat;
1632
Jingoo Han574de552013-07-30 17:06:57 +09001633 plat = dev_get_platdata(uap->port.dev);
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001634 if (plat->init)
1635 plat->init();
1636 }
1637 return 0;
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001638}
1639
Russell King7fe9a5a2015-11-03 14:51:08 +00001640static bool pl011_split_lcrh(const struct uart_amba_port *uap)
1641{
Russell Kinge4df9a82015-11-16 17:40:41 +00001642 return pl011_reg_to_offset(uap, REG_LCRH_RX) !=
1643 pl011_reg_to_offset(uap, REG_LCRH_TX);
Russell King7fe9a5a2015-11-03 14:51:08 +00001644}
1645
Jon Medhurstb60f2f62013-12-10 10:18:59 +00001646static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
1647{
Russell Kinge4df9a82015-11-16 17:40:41 +00001648 pl011_write(lcr_h, uap, REG_LCRH_RX);
Russell King7fe9a5a2015-11-03 14:51:08 +00001649 if (pl011_split_lcrh(uap)) {
Jon Medhurstb60f2f62013-12-10 10:18:59 +00001650 int i;
1651 /*
1652 * Wait 10 PCLKs before writing LCRH_TX register,
1653 * to get this delay write read only register 10 times
1654 */
1655 for (i = 0; i < 10; ++i)
Russell King9f25bc52015-11-03 14:51:13 +00001656 pl011_write(0xff, uap, REG_MIS);
Russell Kinge4df9a82015-11-16 17:40:41 +00001657 pl011_write(lcr_h, uap, REG_LCRH_TX);
Jon Medhurstb60f2f62013-12-10 10:18:59 +00001658 }
1659}
1660
Andre Przywara867b8e82015-05-21 17:26:15 +01001661static int pl011_allocate_irq(struct uart_amba_port *uap)
1662{
Russell King9f25bc52015-11-03 14:51:13 +00001663 pl011_write(uap->im, uap, REG_IMSC);
Andre Przywara867b8e82015-05-21 17:26:15 +01001664
1665 return request_irq(uap->port.irq, pl011_int, 0, "uart-pl011", uap);
1666}
1667
1668/*
1669 * Enable interrupts, only timeouts when using DMA
1670 * if initial RX DMA job failed, start in interrupt mode
1671 * as well.
1672 */
1673static void pl011_enable_interrupts(struct uart_amba_port *uap)
1674{
1675 spin_lock_irq(&uap->port.lock);
1676
1677 /* Clear out any spuriously appearing RX interrupts */
Russell King9f25bc52015-11-03 14:51:13 +00001678 pl011_write(UART011_RTIS | UART011_RXIS, uap, REG_ICR);
Andre Przywara867b8e82015-05-21 17:26:15 +01001679 uap->im = UART011_RTIM;
1680 if (!pl011_dma_rx_running(uap))
1681 uap->im |= UART011_RXIM;
Russell King9f25bc52015-11-03 14:51:13 +00001682 pl011_write(uap->im, uap, REG_IMSC);
Andre Przywara867b8e82015-05-21 17:26:15 +01001683 spin_unlock_irq(&uap->port.lock);
1684}
1685
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001686static int pl011_startup(struct uart_port *port)
1687{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001688 struct uart_amba_port *uap =
1689 container_of(port, struct uart_amba_port, port);
Dave Martin734745c2015-03-04 12:27:33 +00001690 unsigned int cr;
Anton Vorontsovb3564c22012-09-24 14:27:54 -07001691 int retval;
1692
1693 retval = pl011_hwinit(port);
1694 if (retval)
1695 goto clk_dis;
1696
Andre Przywara867b8e82015-05-21 17:26:15 +01001697 retval = pl011_allocate_irq(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 if (retval)
1699 goto clk_dis;
1700
Russell King9f25bc52015-11-03 14:51:13 +00001701 pl011_write(uap->vendor->ifls, uap, REG_IFLS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
Jon Medhurstfe433902013-12-10 10:18:58 +00001703 spin_lock_irq(&uap->port.lock);
1704
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301705 /* restore RTS and DTR */
1706 cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
1707 cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
Russell King9f25bc52015-11-03 14:51:13 +00001708 pl011_write(cr, uap, REG_CR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
Jon Medhurstfe433902013-12-10 10:18:58 +00001710 spin_unlock_irq(&uap->port.lock);
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 /*
1713 * initialise the old status of the modem signals
1714 */
Russell King9f25bc52015-11-03 14:51:13 +00001715 uap->old_status = pl011_read(uap, REG_FR) & UART01x_FR_MODEM_ANY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716
Russell King68b65f72010-12-22 17:24:39 +00001717 /* Startup DMA */
1718 pl011_dma_startup(uap);
1719
Andre Przywara867b8e82015-05-21 17:26:15 +01001720 pl011_enable_interrupts(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 return 0;
1723
1724 clk_dis:
Julia Lawall1c4c4392012-08-26 18:01:01 +02001725 clk_disable_unprepare(uap->clk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 return retval;
1727}
1728
Andre Przywara0dd1e242015-05-21 17:26:23 +01001729static int sbsa_uart_startup(struct uart_port *port)
1730{
1731 struct uart_amba_port *uap =
1732 container_of(port, struct uart_amba_port, port);
1733 int retval;
1734
1735 retval = pl011_hwinit(port);
1736 if (retval)
1737 return retval;
1738
1739 retval = pl011_allocate_irq(uap);
1740 if (retval)
1741 return retval;
1742
1743 /* The SBSA UART does not support any modem status lines. */
1744 uap->old_status = 0;
1745
1746 pl011_enable_interrupts(uap);
1747
1748 return 0;
1749}
1750
Linus Walleijec489aa2010-06-02 08:13:52 +01001751static void pl011_shutdown_channel(struct uart_amba_port *uap,
1752 unsigned int lcrh)
1753{
Greg Kroah-Hartmanf11c9842015-09-04 09:13:39 -07001754 unsigned long val;
Linus Walleijec489aa2010-06-02 08:13:52 +01001755
Russell Kingb2a4e242015-11-03 14:51:03 +00001756 val = pl011_read(uap, lcrh);
Greg Kroah-Hartmanf11c9842015-09-04 09:13:39 -07001757 val &= ~(UART01x_LCRH_BRK | UART01x_LCRH_FEN);
Russell Kingb2a4e242015-11-03 14:51:03 +00001758 pl011_write(val, uap, lcrh);
Linus Walleijec489aa2010-06-02 08:13:52 +01001759}
1760
Andre Przywara95166a32015-05-21 17:26:16 +01001761/*
1762 * disable the port. It should not disable RTS and DTR.
1763 * Also RTS and DTR state should be preserved to restore
1764 * it during startup().
1765 */
1766static void pl011_disable_uart(struct uart_amba_port *uap)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767{
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301768 unsigned int cr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
Rabin Vincent3b438162010-02-12 06:43:11 +01001770 uap->autorts = false;
Jon Medhurstfe433902013-12-10 10:18:58 +00001771 spin_lock_irq(&uap->port.lock);
Russell King9f25bc52015-11-03 14:51:13 +00001772 cr = pl011_read(uap, REG_CR);
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05301773 uap->old_cr = cr;
1774 cr &= UART011_CR_RTS | UART011_CR_DTR;
1775 cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
Russell King9f25bc52015-11-03 14:51:13 +00001776 pl011_write(cr, uap, REG_CR);
Jon Medhurstfe433902013-12-10 10:18:58 +00001777 spin_unlock_irq(&uap->port.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
1779 /*
1780 * disable break condition and fifos
1781 */
Russell Kinge4df9a82015-11-16 17:40:41 +00001782 pl011_shutdown_channel(uap, REG_LCRH_RX);
Russell King7fe9a5a2015-11-03 14:51:08 +00001783 if (pl011_split_lcrh(uap))
Russell Kinge4df9a82015-11-16 17:40:41 +00001784 pl011_shutdown_channel(uap, REG_LCRH_TX);
Andre Przywara95166a32015-05-21 17:26:16 +01001785}
1786
1787static void pl011_disable_interrupts(struct uart_amba_port *uap)
1788{
1789 spin_lock_irq(&uap->port.lock);
1790
1791 /* mask all interrupts and clear all pending ones */
1792 uap->im = 0;
Russell King9f25bc52015-11-03 14:51:13 +00001793 pl011_write(uap->im, uap, REG_IMSC);
1794 pl011_write(0xffff, uap, REG_ICR);
Andre Przywara95166a32015-05-21 17:26:16 +01001795
1796 spin_unlock_irq(&uap->port.lock);
1797}
1798
1799static void pl011_shutdown(struct uart_port *port)
1800{
1801 struct uart_amba_port *uap =
1802 container_of(port, struct uart_amba_port, port);
1803
1804 pl011_disable_interrupts(uap);
1805
1806 pl011_dma_shutdown(uap);
1807
1808 free_irq(uap->port.irq, uap);
1809
1810 pl011_disable_uart(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 /*
1813 * Shut down the clock producer
1814 */
Julia Lawall1c4c4392012-08-26 18:01:01 +02001815 clk_disable_unprepare(uap->clk);
Linus Walleij78d80c52012-05-23 21:18:46 +02001816 /* Optionally let pins go into sleep states */
Linus Walleij2b996fc2013-06-05 15:36:42 +02001817 pinctrl_pm_select_sleep_state(port->dev);
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02001818
Jingoo Han574de552013-07-30 17:06:57 +09001819 if (dev_get_platdata(uap->port.dev)) {
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02001820 struct amba_pl011_data *plat;
1821
Jingoo Han574de552013-07-30 17:06:57 +09001822 plat = dev_get_platdata(uap->port.dev);
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02001823 if (plat->exit)
1824 plat->exit();
1825 }
1826
Peter Hurley36f339d2014-11-06 09:06:12 -05001827 if (uap->port.ops->flush_buffer)
1828 uap->port.ops->flush_buffer(port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829}
1830
Andre Przywara0dd1e242015-05-21 17:26:23 +01001831static void sbsa_uart_shutdown(struct uart_port *port)
1832{
1833 struct uart_amba_port *uap =
1834 container_of(port, struct uart_amba_port, port);
1835
1836 pl011_disable_interrupts(uap);
1837
1838 free_irq(uap->port.irq, uap);
1839
1840 if (uap->port.ops->flush_buffer)
1841 uap->port.ops->flush_buffer(port);
1842}
1843
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844static void
Andre Przywaraef5a9352015-05-21 17:26:17 +01001845pl011_setup_status_masks(struct uart_port *port, struct ktermios *termios)
1846{
1847 port->read_status_mask = UART011_DR_OE | 255;
1848 if (termios->c_iflag & INPCK)
1849 port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
1850 if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
1851 port->read_status_mask |= UART011_DR_BE;
1852
1853 /*
1854 * Characters to ignore
1855 */
1856 port->ignore_status_mask = 0;
1857 if (termios->c_iflag & IGNPAR)
1858 port->ignore_status_mask |= UART011_DR_FE | UART011_DR_PE;
1859 if (termios->c_iflag & IGNBRK) {
1860 port->ignore_status_mask |= UART011_DR_BE;
1861 /*
1862 * If we're ignoring parity and break indicators,
1863 * ignore overruns too (for real raw support).
1864 */
1865 if (termios->c_iflag & IGNPAR)
1866 port->ignore_status_mask |= UART011_DR_OE;
1867 }
1868
1869 /*
1870 * Ignore all characters if CREAD is not set.
1871 */
1872 if ((termios->c_cflag & CREAD) == 0)
1873 port->ignore_status_mask |= UART_DUMMY_DR_RX;
1874}
1875
1876static void
Alan Cox606d0992006-12-08 02:38:45 -08001877pl011_set_termios(struct uart_port *port, struct ktermios *termios,
1878 struct ktermios *old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879{
Daniel Thompsona5820c22014-09-03 12:51:55 +01001880 struct uart_amba_port *uap =
1881 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 unsigned int lcr_h, old_cr;
1883 unsigned long flags;
Russell Kingc19f12b2010-12-22 17:48:26 +00001884 unsigned int baud, quot, clkdiv;
1885
1886 if (uap->vendor->oversampling)
1887 clkdiv = 8;
1888 else
1889 clkdiv = 16;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890
1891 /*
1892 * Ask the core to calculate the divisor for us.
1893 */
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001894 baud = uart_get_baud_rate(port, termios, old, 0,
Russell Kingc19f12b2010-12-22 17:48:26 +00001895 port->uartclk / clkdiv);
Chanho Min89fa28d2013-04-03 11:10:37 +09001896#ifdef CONFIG_DMA_ENGINE
Chanho Mincb06ff12013-03-27 18:38:11 +09001897 /*
1898 * Adjust RX DMA polling rate with baud rate if not specified.
1899 */
1900 if (uap->dmarx.auto_poll_rate)
1901 uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
Chanho Min89fa28d2013-04-03 11:10:37 +09001902#endif
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001903
1904 if (baud > port->uartclk/16)
1905 quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
1906 else
1907 quot = DIV_ROUND_CLOSEST(port->uartclk * 4, baud);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909 switch (termios->c_cflag & CSIZE) {
1910 case CS5:
1911 lcr_h = UART01x_LCRH_WLEN_5;
1912 break;
1913 case CS6:
1914 lcr_h = UART01x_LCRH_WLEN_6;
1915 break;
1916 case CS7:
1917 lcr_h = UART01x_LCRH_WLEN_7;
1918 break;
1919 default: // CS8
1920 lcr_h = UART01x_LCRH_WLEN_8;
1921 break;
1922 }
1923 if (termios->c_cflag & CSTOPB)
1924 lcr_h |= UART01x_LCRH_STP2;
1925 if (termios->c_cflag & PARENB) {
1926 lcr_h |= UART01x_LCRH_PEN;
1927 if (!(termios->c_cflag & PARODD))
1928 lcr_h |= UART01x_LCRH_EPS;
1929 }
Russell Kingffca2b12010-12-22 17:13:05 +00001930 if (uap->fifosize > 1)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 lcr_h |= UART01x_LCRH_FEN;
1932
1933 spin_lock_irqsave(&port->lock, flags);
1934
1935 /*
1936 * Update the per-port timeout.
1937 */
1938 uart_update_timeout(port, termios->c_cflag, baud);
1939
Andre Przywaraef5a9352015-05-21 17:26:17 +01001940 pl011_setup_status_masks(port, termios);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941
1942 if (UART_ENABLE_MS(port, termios->c_cflag))
1943 pl011_enable_ms(port);
1944
1945 /* first, disable everything */
Russell King9f25bc52015-11-03 14:51:13 +00001946 old_cr = pl011_read(uap, REG_CR);
1947 pl011_write(0, uap, REG_CR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948
Rabin Vincent3b438162010-02-12 06:43:11 +01001949 if (termios->c_cflag & CRTSCTS) {
1950 if (old_cr & UART011_CR_RTS)
1951 old_cr |= UART011_CR_RTSEN;
1952
1953 old_cr |= UART011_CR_CTSEN;
1954 uap->autorts = true;
1955 } else {
1956 old_cr &= ~(UART011_CR_CTSEN | UART011_CR_RTSEN);
1957 uap->autorts = false;
1958 }
1959
Russell Kingc19f12b2010-12-22 17:48:26 +00001960 if (uap->vendor->oversampling) {
1961 if (baud > port->uartclk / 16)
Linus Walleijac3e3fb2010-06-02 20:40:22 +01001962 old_cr |= ST_UART011_CR_OVSFACT;
1963 else
1964 old_cr &= ~ST_UART011_CR_OVSFACT;
1965 }
1966
Linus Walleijc5dd5532012-09-26 17:21:36 +02001967 /*
1968 * Workaround for the ST Micro oversampling variants to
1969 * increase the bitrate slightly, by lowering the divisor,
1970 * to avoid delayed sampling of start bit at high speeds,
1971 * else we see data corruption.
1972 */
1973 if (uap->vendor->oversampling) {
1974 if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
1975 quot -= 1;
1976 else if ((baud > 3250000) && (quot > 2))
1977 quot -= 2;
1978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 /* Set baud rate */
Russell King9f25bc52015-11-03 14:51:13 +00001980 pl011_write(quot & 0x3f, uap, REG_FBRD);
1981 pl011_write(quot >> 6, uap, REG_IBRD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982
1983 /*
1984 * ----------v----------v----------v----------v-----
Russell Kinge4df9a82015-11-16 17:40:41 +00001985 * NOTE: REG_LCRH_TX and REG_LCRH_RX MUST BE WRITTEN AFTER
Russell King9f25bc52015-11-03 14:51:13 +00001986 * REG_FBRD & REG_IBRD.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 * ----------^----------^----------^----------^-----
1988 */
Jon Medhurstb60f2f62013-12-10 10:18:59 +00001989 pl011_write_lcr_h(uap, lcr_h);
Russell King9f25bc52015-11-03 14:51:13 +00001990 pl011_write(old_cr, uap, REG_CR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991
1992 spin_unlock_irqrestore(&port->lock, flags);
1993}
1994
Andre Przywara0dd1e242015-05-21 17:26:23 +01001995static void
1996sbsa_uart_set_termios(struct uart_port *port, struct ktermios *termios,
1997 struct ktermios *old)
1998{
1999 struct uart_amba_port *uap =
2000 container_of(port, struct uart_amba_port, port);
2001 unsigned long flags;
2002
2003 tty_termios_encode_baud_rate(termios, uap->fixed_baud, uap->fixed_baud);
2004
2005 /* The SBSA UART only supports 8n1 without hardware flow control. */
2006 termios->c_cflag &= ~(CSIZE | CSTOPB | PARENB | PARODD);
2007 termios->c_cflag &= ~(CMSPAR | CRTSCTS);
2008 termios->c_cflag |= CS8 | CLOCAL;
2009
2010 spin_lock_irqsave(&port->lock, flags);
2011 uart_update_timeout(port, CS8, uap->fixed_baud);
2012 pl011_setup_status_masks(port, termios);
2013 spin_unlock_irqrestore(&port->lock, flags);
2014}
2015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016static const char *pl011_type(struct uart_port *port)
2017{
Daniel Thompsona5820c22014-09-03 12:51:55 +01002018 struct uart_amba_port *uap =
2019 container_of(port, struct uart_amba_port, port);
Russell Kinge8a7ba82010-12-28 09:16:54 +00002020 return uap->port.type == PORT_AMBA ? uap->type : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021}
2022
2023/*
2024 * Release the memory region(s) being used by 'port'
2025 */
Linus Walleije643f872012-06-17 15:44:19 +02002026static void pl011_release_port(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027{
2028 release_mem_region(port->mapbase, SZ_4K);
2029}
2030
2031/*
2032 * Request the memory region(s) being used by 'port'
2033 */
Linus Walleije643f872012-06-17 15:44:19 +02002034static int pl011_request_port(struct uart_port *port)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035{
2036 return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
2037 != NULL ? 0 : -EBUSY;
2038}
2039
2040/*
2041 * Configure/autoconfigure the port.
2042 */
Linus Walleije643f872012-06-17 15:44:19 +02002043static void pl011_config_port(struct uart_port *port, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044{
2045 if (flags & UART_CONFIG_TYPE) {
2046 port->type = PORT_AMBA;
Linus Walleije643f872012-06-17 15:44:19 +02002047 pl011_request_port(port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 }
2049}
2050
2051/*
2052 * verify the new serial_struct (for TIOCSSERIAL).
2053 */
Linus Walleije643f872012-06-17 15:44:19 +02002054static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055{
2056 int ret = 0;
2057 if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
2058 ret = -EINVAL;
Yinghai Lua62c4132008-08-19 20:49:55 -07002059 if (ser->irq < 0 || ser->irq >= nr_irqs)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 ret = -EINVAL;
2061 if (ser->baud_base < 9600)
2062 ret = -EINVAL;
2063 return ret;
2064}
2065
2066static struct uart_ops amba_pl011_pops = {
Linus Walleije643f872012-06-17 15:44:19 +02002067 .tx_empty = pl011_tx_empty,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068 .set_mctrl = pl011_set_mctrl,
Linus Walleije643f872012-06-17 15:44:19 +02002069 .get_mctrl = pl011_get_mctrl,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 .stop_tx = pl011_stop_tx,
2071 .start_tx = pl011_start_tx,
2072 .stop_rx = pl011_stop_rx,
2073 .enable_ms = pl011_enable_ms,
2074 .break_ctl = pl011_break_ctl,
2075 .startup = pl011_startup,
2076 .shutdown = pl011_shutdown,
Russell King68b65f72010-12-22 17:24:39 +00002077 .flush_buffer = pl011_dma_flush_buffer,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 .set_termios = pl011_set_termios,
2079 .type = pl011_type,
Linus Walleije643f872012-06-17 15:44:19 +02002080 .release_port = pl011_release_port,
2081 .request_port = pl011_request_port,
2082 .config_port = pl011_config_port,
2083 .verify_port = pl011_verify_port,
Jason Wessel84b5ae12008-02-20 13:33:39 -06002084#ifdef CONFIG_CONSOLE_POLL
Anton Vorontsovb3564c22012-09-24 14:27:54 -07002085 .poll_init = pl011_hwinit,
Linus Walleije643f872012-06-17 15:44:19 +02002086 .poll_get_char = pl011_get_poll_char,
2087 .poll_put_char = pl011_put_poll_char,
Jason Wessel84b5ae12008-02-20 13:33:39 -06002088#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089};
2090
Andre Przywara0dd1e242015-05-21 17:26:23 +01002091static void sbsa_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
2092{
2093}
2094
2095static unsigned int sbsa_uart_get_mctrl(struct uart_port *port)
2096{
2097 return 0;
2098}
2099
2100static const struct uart_ops sbsa_uart_pops = {
2101 .tx_empty = pl011_tx_empty,
2102 .set_mctrl = sbsa_uart_set_mctrl,
2103 .get_mctrl = sbsa_uart_get_mctrl,
2104 .stop_tx = pl011_stop_tx,
2105 .start_tx = pl011_start_tx,
2106 .stop_rx = pl011_stop_rx,
2107 .startup = sbsa_uart_startup,
2108 .shutdown = sbsa_uart_shutdown,
2109 .set_termios = sbsa_uart_set_termios,
2110 .type = pl011_type,
2111 .release_port = pl011_release_port,
2112 .request_port = pl011_request_port,
2113 .config_port = pl011_config_port,
2114 .verify_port = pl011_verify_port,
2115#ifdef CONFIG_CONSOLE_POLL
2116 .poll_init = pl011_hwinit,
2117 .poll_get_char = pl011_get_poll_char,
2118 .poll_put_char = pl011_put_poll_char,
2119#endif
2120};
2121
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122static struct uart_amba_port *amba_ports[UART_NR];
2123
2124#ifdef CONFIG_SERIAL_AMBA_PL011_CONSOLE
2125
Russell Kingd3587882006-03-20 20:00:09 +00002126static void pl011_console_putchar(struct uart_port *port, int ch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127{
Daniel Thompsona5820c22014-09-03 12:51:55 +01002128 struct uart_amba_port *uap =
2129 container_of(port, struct uart_amba_port, port);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Russell King9f25bc52015-11-03 14:51:13 +00002131 while (pl011_read(uap, REG_FR) & UART01x_FR_TXFF)
Russell Kingd3587882006-03-20 20:00:09 +00002132 barrier();
Russell King9f25bc52015-11-03 14:51:13 +00002133 pl011_write(ch, uap, REG_DR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134}
2135
2136static void
2137pl011_console_write(struct console *co, const char *s, unsigned int count)
2138{
2139 struct uart_amba_port *uap = amba_ports[co->index];
Andre Przywara71eec482015-05-21 17:26:21 +01002140 unsigned int status, old_cr = 0, new_cr;
Rabin Vincentef605fd2012-01-17 11:52:28 +01002141 unsigned long flags;
2142 int locked = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144 clk_enable(uap->clk);
2145
Rabin Vincentef605fd2012-01-17 11:52:28 +01002146 local_irq_save(flags);
2147 if (uap->port.sysrq)
2148 locked = 0;
2149 else if (oops_in_progress)
2150 locked = spin_trylock(&uap->port.lock);
2151 else
2152 spin_lock(&uap->port.lock);
2153
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 /*
2155 * First save the CR then disable the interrupts
2156 */
Andre Przywara71eec482015-05-21 17:26:21 +01002157 if (!uap->vendor->always_enabled) {
Russell King9f25bc52015-11-03 14:51:13 +00002158 old_cr = pl011_read(uap, REG_CR);
Andre Przywara71eec482015-05-21 17:26:21 +01002159 new_cr = old_cr & ~UART011_CR_CTSEN;
2160 new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
Russell King9f25bc52015-11-03 14:51:13 +00002161 pl011_write(new_cr, uap, REG_CR);
Andre Przywara71eec482015-05-21 17:26:21 +01002162 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
Russell Kingd3587882006-03-20 20:00:09 +00002164 uart_console_write(&uap->port, s, count, pl011_console_putchar);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 /*
2167 * Finally, wait for transmitter to become empty
2168 * and restore the TCR
2169 */
2170 do {
Russell King9f25bc52015-11-03 14:51:13 +00002171 status = pl011_read(uap, REG_FR);
Greg Kroah-Hartman062a68a2015-09-04 09:11:24 -07002172 } while (status & UART01x_FR_BUSY);
Andre Przywara71eec482015-05-21 17:26:21 +01002173 if (!uap->vendor->always_enabled)
Russell King9f25bc52015-11-03 14:51:13 +00002174 pl011_write(old_cr, uap, REG_CR);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
Rabin Vincentef605fd2012-01-17 11:52:28 +01002176 if (locked)
2177 spin_unlock(&uap->port.lock);
2178 local_irq_restore(flags);
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 clk_disable(uap->clk);
2181}
2182
2183static void __init
2184pl011_console_get_options(struct uart_amba_port *uap, int *baud,
2185 int *parity, int *bits)
2186{
Russell King9f25bc52015-11-03 14:51:13 +00002187 if (pl011_read(uap, REG_CR) & UART01x_CR_UARTEN) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 unsigned int lcr_h, ibrd, fbrd;
2189
Russell Kinge4df9a82015-11-16 17:40:41 +00002190 lcr_h = pl011_read(uap, REG_LCRH_TX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
2192 *parity = 'n';
2193 if (lcr_h & UART01x_LCRH_PEN) {
2194 if (lcr_h & UART01x_LCRH_EPS)
2195 *parity = 'e';
2196 else
2197 *parity = 'o';
2198 }
2199
2200 if ((lcr_h & 0x60) == UART01x_LCRH_WLEN_7)
2201 *bits = 7;
2202 else
2203 *bits = 8;
2204
Russell King9f25bc52015-11-03 14:51:13 +00002205 ibrd = pl011_read(uap, REG_IBRD);
2206 fbrd = pl011_read(uap, REG_FBRD);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208 *baud = uap->port.uartclk * 4 / (64 * ibrd + fbrd);
Linus Walleijac3e3fb2010-06-02 20:40:22 +01002209
Russell Kingc19f12b2010-12-22 17:48:26 +00002210 if (uap->vendor->oversampling) {
Russell King9f25bc52015-11-03 14:51:13 +00002211 if (pl011_read(uap, REG_CR)
Linus Walleijac3e3fb2010-06-02 20:40:22 +01002212 & ST_UART011_CR_OVSFACT)
2213 *baud *= 2;
2214 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 }
2216}
2217
2218static int __init pl011_console_setup(struct console *co, char *options)
2219{
2220 struct uart_amba_port *uap;
2221 int baud = 38400;
2222 int bits = 8;
2223 int parity = 'n';
2224 int flow = 'n';
Russell King4b4851c2011-09-22 11:35:30 +01002225 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226
2227 /*
2228 * Check whether an invalid uart number has been specified, and
2229 * if so, search for the first available port that does have
2230 * console support.
2231 */
2232 if (co->index >= UART_NR)
2233 co->index = 0;
2234 uap = amba_ports[co->index];
Russell Kingd28122a2007-01-22 18:59:42 +00002235 if (!uap)
2236 return -ENODEV;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
Linus Walleij78d80c52012-05-23 21:18:46 +02002238 /* Allow pins to be muxed in and configured */
Linus Walleij2b996fc2013-06-05 15:36:42 +02002239 pinctrl_pm_select_default_state(uap->port.dev);
Linus Walleij78d80c52012-05-23 21:18:46 +02002240
Russell King4b4851c2011-09-22 11:35:30 +01002241 ret = clk_prepare(uap->clk);
2242 if (ret)
2243 return ret;
2244
Jingoo Han574de552013-07-30 17:06:57 +09002245 if (dev_get_platdata(uap->port.dev)) {
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02002246 struct amba_pl011_data *plat;
2247
Jingoo Han574de552013-07-30 17:06:57 +09002248 plat = dev_get_platdata(uap->port.dev);
Shreshtha Kumar Sahuc16d51a2011-06-13 10:11:33 +02002249 if (plat->init)
2250 plat->init();
2251 }
2252
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 uap->port.uartclk = clk_get_rate(uap->clk);
2254
Andre Przywaracefc2d12015-05-21 17:26:22 +01002255 if (uap->vendor->fixed_options) {
2256 baud = uap->fixed_baud;
2257 } else {
2258 if (options)
2259 uart_parse_options(options,
2260 &baud, &parity, &bits, &flow);
2261 else
2262 pl011_console_get_options(uap, &baud, &parity, &bits);
2263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
2265 return uart_set_options(&uap->port, co, baud, parity, bits, flow);
2266}
2267
Vincent Sanders2d934862005-09-14 22:36:03 +01002268static struct uart_driver amba_reg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269static struct console amba_console = {
2270 .name = "ttyAMA",
2271 .write = pl011_console_write,
2272 .device = uart_console_device,
2273 .setup = pl011_console_setup,
2274 .flags = CON_PRINTBUFFER,
2275 .index = -1,
2276 .data = &amba_reg,
2277};
2278
2279#define AMBA_CONSOLE (&amba_console)
Rob Herring0d3c6732014-04-18 17:19:57 -05002280
2281static void pl011_putc(struct uart_port *port, int c)
2282{
Russell King9f25bc52015-11-03 14:51:13 +00002283 while (readl(port->membase + REG_FR) & UART01x_FR_TXFF)
Rob Herring0d3c6732014-04-18 17:19:57 -05002284 ;
Russell King9f25bc52015-11-03 14:51:13 +00002285 writeb(c, port->membase + REG_DR);
2286 while (readl(port->membase + REG_FR) & UART01x_FR_BUSY)
Rob Herring0d3c6732014-04-18 17:19:57 -05002287 ;
2288}
2289
2290static void pl011_early_write(struct console *con, const char *s, unsigned n)
2291{
2292 struct earlycon_device *dev = con->data;
2293
2294 uart_console_write(&dev->port, s, n, pl011_putc);
2295}
2296
2297static int __init pl011_early_console_setup(struct earlycon_device *device,
2298 const char *opt)
2299{
2300 if (!device->port.membase)
2301 return -ENODEV;
2302
2303 device->con->write = pl011_early_write;
2304 return 0;
2305}
2306EARLYCON_DECLARE(pl011, pl011_early_console_setup);
Rob Herring45e0f0f2014-03-27 08:08:03 -05002307OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
Rob Herring0d3c6732014-04-18 17:19:57 -05002308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309#else
2310#define AMBA_CONSOLE NULL
2311#endif
2312
2313static struct uart_driver amba_reg = {
2314 .owner = THIS_MODULE,
2315 .driver_name = "ttyAMA",
2316 .dev_name = "ttyAMA",
2317 .major = SERIAL_AMBA_MAJOR,
2318 .minor = SERIAL_AMBA_MINOR,
2319 .nr = UART_NR,
2320 .cons = AMBA_CONSOLE,
2321};
2322
Matthew Leach32614aa2012-08-28 16:41:28 +01002323static int pl011_probe_dt_alias(int index, struct device *dev)
2324{
2325 struct device_node *np;
2326 static bool seen_dev_with_alias = false;
2327 static bool seen_dev_without_alias = false;
2328 int ret = index;
2329
2330 if (!IS_ENABLED(CONFIG_OF))
2331 return ret;
2332
2333 np = dev->of_node;
2334 if (!np)
2335 return ret;
2336
2337 ret = of_alias_get_id(np, "serial");
2338 if (IS_ERR_VALUE(ret)) {
2339 seen_dev_without_alias = true;
2340 ret = index;
2341 } else {
2342 seen_dev_with_alias = true;
2343 if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
2344 dev_warn(dev, "requested serial port %d not available.\n", ret);
2345 ret = index;
2346 }
2347 }
2348
2349 if (seen_dev_with_alias && seen_dev_without_alias)
2350 dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
2351
2352 return ret;
2353}
2354
Andre Przywara49bb3c82015-05-21 17:26:14 +01002355/* unregisters the driver also if no more ports are left */
2356static void pl011_unregister_port(struct uart_amba_port *uap)
2357{
2358 int i;
2359 bool busy = false;
2360
2361 for (i = 0; i < ARRAY_SIZE(amba_ports); i++) {
2362 if (amba_ports[i] == uap)
2363 amba_ports[i] = NULL;
2364 else if (amba_ports[i])
2365 busy = true;
2366 }
2367 pl011_dma_remove(uap);
2368 if (!busy)
2369 uart_unregister_driver(&amba_reg);
2370}
2371
Andre Przywara3873e2d2015-05-21 17:26:18 +01002372static int pl011_find_free_port(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
Andre Przywara3873e2d2015-05-21 17:26:18 +01002374 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
2376 for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
2377 if (amba_ports[i] == NULL)
Andre Przywara3873e2d2015-05-21 17:26:18 +01002378 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Andre Przywara3873e2d2015-05-21 17:26:18 +01002380 return -EBUSY;
2381}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382
Andre Przywara3873e2d2015-05-21 17:26:18 +01002383static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2384 struct resource *mmiobase, int index)
2385{
2386 void __iomem *base;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387
Andre Przywara3873e2d2015-05-21 17:26:18 +01002388 base = devm_ioremap_resource(dev, mmiobase);
Krzysztof Kozlowski97a60ea2015-07-09 22:21:41 +09002389 if (IS_ERR(base))
2390 return PTR_ERR(base);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
Andre Przywara3873e2d2015-05-21 17:26:18 +01002392 index = pl011_probe_dt_alias(index, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393
Shreshtha Kumar Sahud8d8ffa2012-01-18 15:53:59 +05302394 uap->old_cr = 0;
Andre Przywara3873e2d2015-05-21 17:26:18 +01002395 uap->port.dev = dev;
2396 uap->port.mapbase = mmiobase->start;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 uap->port.membase = base;
2398 uap->port.iotype = UPIO_MEM;
Russell Kingffca2b12010-12-22 17:13:05 +00002399 uap->port.fifosize = uap->fifosize;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400 uap->port.flags = UPF_BOOT_AUTOCONF;
Andre Przywara3873e2d2015-05-21 17:26:18 +01002401 uap->port.line = index;
2402
2403 amba_ports[index] = uap;
2404
2405 return 0;
2406}
2407
2408static int pl011_register_port(struct uart_amba_port *uap)
2409{
2410 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Linus Walleijc3d8b762012-03-21 20:15:18 +01002412 /* Ensure interrupts from this UART are masked and cleared */
Russell King9f25bc52015-11-03 14:51:13 +00002413 pl011_write(0, uap, REG_IMSC);
2414 pl011_write(0xffff, uap, REG_ICR);
Linus Walleijc3d8b762012-03-21 20:15:18 +01002415
Tushar Beheraef2889f2014-01-20 14:32:35 +05302416 if (!amba_reg.state) {
2417 ret = uart_register_driver(&amba_reg);
2418 if (ret < 0) {
Andre Przywara3873e2d2015-05-21 17:26:18 +01002419 dev_err(uap->port.dev,
Jorge Ramirez-Ortiz1c9be312015-03-06 13:05:40 -05002420 "Failed to register AMBA-PL011 driver\n");
Tushar Beheraef2889f2014-01-20 14:32:35 +05302421 return ret;
2422 }
2423 }
2424
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 ret = uart_add_one_port(&amba_reg, &uap->port);
Andre Przywara49bb3c82015-05-21 17:26:14 +01002426 if (ret)
2427 pl011_unregister_port(uap);
Tushar Behera7f6d9422014-06-26 15:35:35 +05302428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 return ret;
2430}
2431
Andre Przywara3873e2d2015-05-21 17:26:18 +01002432static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
2433{
2434 struct uart_amba_port *uap;
2435 struct vendor_data *vendor = id->data;
2436 int portnr, ret;
2437
2438 portnr = pl011_find_free_port();
2439 if (portnr < 0)
2440 return portnr;
2441
2442 uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
2443 GFP_KERNEL);
2444 if (!uap)
2445 return -ENOMEM;
2446
2447 uap->clk = devm_clk_get(&dev->dev, NULL);
2448 if (IS_ERR(uap->clk))
2449 return PTR_ERR(uap->clk);
2450
Russell King439403b2015-11-16 17:40:31 +00002451 uap->reg_offset = vendor->reg_offset;
Andre Przywara3873e2d2015-05-21 17:26:18 +01002452 uap->vendor = vendor;
Andre Przywara3873e2d2015-05-21 17:26:18 +01002453 uap->fifosize = vendor->get_fifosize(dev);
2454 uap->port.irq = dev->irq[0];
2455 uap->port.ops = &amba_pl011_pops;
2456
2457 snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
2458
2459 ret = pl011_setup_port(&dev->dev, uap, &dev->res, portnr);
2460 if (ret)
2461 return ret;
2462
2463 amba_set_drvdata(dev, uap);
2464
2465 return pl011_register_port(uap);
2466}
2467
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468static int pl011_remove(struct amba_device *dev)
2469{
2470 struct uart_amba_port *uap = amba_get_drvdata(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 uart_remove_one_port(&amba_reg, &uap->port);
Andre Przywara49bb3c82015-05-21 17:26:14 +01002473 pl011_unregister_port(uap);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 return 0;
2475}
2476
Ulf Hanssond0ce8502013-12-03 11:04:28 +01002477#ifdef CONFIG_PM_SLEEP
2478static int pl011_suspend(struct device *dev)
Leo Chenb736b892009-07-28 23:43:33 +01002479{
Ulf Hanssond0ce8502013-12-03 11:04:28 +01002480 struct uart_amba_port *uap = dev_get_drvdata(dev);
Leo Chenb736b892009-07-28 23:43:33 +01002481
2482 if (!uap)
2483 return -EINVAL;
2484
2485 return uart_suspend_port(&amba_reg, &uap->port);
2486}
2487
Ulf Hanssond0ce8502013-12-03 11:04:28 +01002488static int pl011_resume(struct device *dev)
Leo Chenb736b892009-07-28 23:43:33 +01002489{
Ulf Hanssond0ce8502013-12-03 11:04:28 +01002490 struct uart_amba_port *uap = dev_get_drvdata(dev);
Leo Chenb736b892009-07-28 23:43:33 +01002491
2492 if (!uap)
2493 return -EINVAL;
2494
2495 return uart_resume_port(&amba_reg, &uap->port);
2496}
2497#endif
2498
Ulf Hanssond0ce8502013-12-03 11:04:28 +01002499static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
2500
Andre Przywara0dd1e242015-05-21 17:26:23 +01002501static int sbsa_uart_probe(struct platform_device *pdev)
2502{
2503 struct uart_amba_port *uap;
2504 struct resource *r;
2505 int portnr, ret;
2506 int baudrate;
2507
2508 /*
2509 * Check the mandatory baud rate parameter in the DT node early
2510 * so that we can easily exit with the error.
2511 */
2512 if (pdev->dev.of_node) {
2513 struct device_node *np = pdev->dev.of_node;
2514
2515 ret = of_property_read_u32(np, "current-speed", &baudrate);
2516 if (ret)
2517 return ret;
2518 } else {
2519 baudrate = 115200;
2520 }
2521
2522 portnr = pl011_find_free_port();
2523 if (portnr < 0)
2524 return portnr;
2525
2526 uap = devm_kzalloc(&pdev->dev, sizeof(struct uart_amba_port),
2527 GFP_KERNEL);
2528 if (!uap)
2529 return -ENOMEM;
2530
Russell King439403b2015-11-16 17:40:31 +00002531 uap->reg_offset = vendor_sbsa.reg_offset;
Andre Przywara0dd1e242015-05-21 17:26:23 +01002532 uap->vendor = &vendor_sbsa;
2533 uap->fifosize = 32;
2534 uap->port.irq = platform_get_irq(pdev, 0);
2535 uap->port.ops = &sbsa_uart_pops;
2536 uap->fixed_baud = baudrate;
2537
2538 snprintf(uap->type, sizeof(uap->type), "SBSA");
2539
2540 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2541
2542 ret = pl011_setup_port(&pdev->dev, uap, r, portnr);
2543 if (ret)
2544 return ret;
2545
2546 platform_set_drvdata(pdev, uap);
2547
2548 return pl011_register_port(uap);
2549}
2550
2551static int sbsa_uart_remove(struct platform_device *pdev)
2552{
2553 struct uart_amba_port *uap = platform_get_drvdata(pdev);
2554
2555 uart_remove_one_port(&amba_reg, &uap->port);
2556 pl011_unregister_port(uap);
2557 return 0;
2558}
2559
2560static const struct of_device_id sbsa_uart_of_match[] = {
2561 { .compatible = "arm,sbsa-uart", },
2562 {},
2563};
2564MODULE_DEVICE_TABLE(of, sbsa_uart_of_match);
2565
Graeme Gregory3db9ab02015-05-21 17:26:24 +01002566static const struct acpi_device_id sbsa_uart_acpi_match[] = {
2567 { "ARMH0011", 0 },
2568 {},
2569};
2570MODULE_DEVICE_TABLE(acpi, sbsa_uart_acpi_match);
2571
Andre Przywara0dd1e242015-05-21 17:26:23 +01002572static struct platform_driver arm_sbsa_uart_platform_driver = {
2573 .probe = sbsa_uart_probe,
2574 .remove = sbsa_uart_remove,
2575 .driver = {
2576 .name = "sbsa-uart",
2577 .of_match_table = of_match_ptr(sbsa_uart_of_match),
Graeme Gregory3db9ab02015-05-21 17:26:24 +01002578 .acpi_match_table = ACPI_PTR(sbsa_uart_acpi_match),
Andre Przywara0dd1e242015-05-21 17:26:23 +01002579 },
2580};
2581
Russell King2c39c9e2010-07-27 08:50:16 +01002582static struct amba_id pl011_ids[] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 {
2584 .id = 0x00041011,
2585 .mask = 0x000fffff,
Alessandro Rubini5926a292009-06-04 17:43:04 +01002586 .data = &vendor_arm,
2587 },
2588 {
2589 .id = 0x00380802,
2590 .mask = 0x00ffffff,
2591 .data = &vendor_st,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 },
2593 { 0, 0 },
2594};
2595
Dave Martin60f7a332011-10-05 15:15:22 +01002596MODULE_DEVICE_TABLE(amba, pl011_ids);
2597
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598static struct amba_driver pl011_driver = {
2599 .drv = {
2600 .name = "uart-pl011",
Ulf Hanssond0ce8502013-12-03 11:04:28 +01002601 .pm = &pl011_dev_pm_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 },
2603 .id_table = pl011_ids,
2604 .probe = pl011_probe,
2605 .remove = pl011_remove,
2606};
2607
2608static int __init pl011_init(void)
2609{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
2611
Andre Przywara0dd1e242015-05-21 17:26:23 +01002612 if (platform_driver_register(&arm_sbsa_uart_platform_driver))
2613 pr_warn("could not register SBSA UART platform driver\n");
Greg Kroah-Hartman062a68a2015-09-04 09:11:24 -07002614 return amba_driver_register(&pl011_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615}
2616
2617static void __exit pl011_exit(void)
2618{
Andre Przywara0dd1e242015-05-21 17:26:23 +01002619 platform_driver_unregister(&arm_sbsa_uart_platform_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 amba_driver_unregister(&pl011_driver);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621}
2622
Alessandro Rubini4dd9e742009-05-05 05:54:13 +01002623/*
2624 * While this can be a module, if builtin it's most likely the console
2625 * So let's leave module_exit but move module_init to an earlier place
2626 */
2627arch_initcall(pl011_init);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628module_exit(pl011_exit);
2629
2630MODULE_AUTHOR("ARM Ltd/Deep Blue Solutions Ltd");
2631MODULE_DESCRIPTION("ARM AMBA serial port driver");
2632MODULE_LICENSE("GPL");