blob: 7645df4aaad2d1a0384b77265ff712285f959c0f [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* drivers/serial/msm_serial_hs.c
Mayank Rana55046232011-03-07 10:28:42 +05302 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 * MSM 7k High speed uart driver
4 *
Mayank Rana55046232011-03-07 10:28:42 +05305 * Copyright (c) 2008 Google Inc.
Mayank Ranaadc41562013-01-04 12:44:01 +05306 * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
Mayank Rana55046232011-03-07 10:28:42 +05307 * Modified: Nick Pelly <npelly@google.com>
8 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 * All source code in this file is licensed under the following license
10 * except where indicated.
11 *
Mayank Rana55046232011-03-07 10:28:42 +053012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * Has optional support for uart power management independent of linux
22 * suspend/resume:
23 *
24 * RX wakeup.
25 * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
26 * UART RX pin). This should only be used if there is not a wakeup
27 * GPIO on the UART CTS, and the first RX byte is known (for example, with the
28 * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
29 * always be lost. RTS will be asserted even while the UART is off in this mode
30 * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
31 */
32
33#include <linux/module.h>
34
35#include <linux/serial.h>
36#include <linux/serial_core.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/interrupt.h>
40#include <linux/irq.h>
41#include <linux/io.h>
42#include <linux/ioport.h>
43#include <linux/kernel.h>
44#include <linux/timer.h>
45#include <linux/clk.h>
46#include <linux/platform_device.h>
47#include <linux/pm_runtime.h>
48#include <linux/dma-mapping.h>
49#include <linux/dmapool.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070050#include <linux/tty_flip.h>
Mayank Rana55046232011-03-07 10:28:42 +053051#include <linux/wait.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052#include <linux/sysfs.h>
53#include <linux/stat.h>
54#include <linux/device.h>
55#include <linux/wakelock.h>
56#include <linux/debugfs.h>
Mayank Ranaff398d02012-12-18 10:22:50 +053057#include <linux/of.h>
58#include <linux/of_device.h>
59#include <linux/of_gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070060#include <asm/atomic.h>
Mayank Rana55046232011-03-07 10:28:42 +053061#include <asm/irq.h>
Mayank Rana55046232011-03-07 10:28:42 +053062
63#include <mach/hardware.h>
64#include <mach/dma.h>
Saket Saurabhcbf6c522013-01-07 16:30:37 +053065#include <mach/sps.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070066#include <mach/msm_serial_hs.h>
Mayank Rana88d49142013-01-16 17:28:53 +053067#include <mach/msm_bus.h>
Mayank Rana55046232011-03-07 10:28:42 +053068
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069#include "msm_serial_hs_hwreg.h"
Saket Saurabhcbf6c522013-01-07 16:30:37 +053070#define UART_SPS_CONS_PERIPHERAL 0
71#define UART_SPS_PROD_PERIPHERAL 1
Mayank Rana55046232011-03-07 10:28:42 +053072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073static int hs_serial_debug_mask = 1;
74module_param_named(debug_mask, hs_serial_debug_mask,
75 int, S_IRUGO | S_IWUSR | S_IWGRP);
Mayank Ranaff398d02012-12-18 10:22:50 +053076/*
77 * There are 3 different kind of UART Core available on MSM.
78 * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
79 * and BSLP based HSUART.
80 */
81enum uart_core_type {
82 LEGACY_HSUART,
83 GSBI_HSUART,
84 BLSP_HSUART,
85};
Mayank Rana55046232011-03-07 10:28:42 +053086
Mayank Rana55046232011-03-07 10:28:42 +053087enum flush_reason {
88 FLUSH_NONE,
89 FLUSH_DATA_READY,
90 FLUSH_DATA_INVALID, /* values after this indicate invalid data */
91 FLUSH_IGNORE = FLUSH_DATA_INVALID,
92 FLUSH_STOP,
93 FLUSH_SHUTDOWN,
94};
95
Mayank Rana55046232011-03-07 10:28:42 +053096enum msm_hs_clk_states_e {
97 MSM_HS_CLK_PORT_OFF, /* port not in use */
98 MSM_HS_CLK_OFF, /* clock disabled */
99 MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
100 MSM_HS_CLK_ON, /* clock enabled */
101};
102
103/* Track the forced RXSTALE flush during clock off sequence.
104 * These states are only valid during MSM_HS_CLK_REQUEST_OFF */
105enum msm_hs_clk_req_off_state_e {
106 CLK_REQ_OFF_START,
107 CLK_REQ_OFF_RXSTALE_ISSUED,
108 CLK_REQ_OFF_FLUSH_ISSUED,
109 CLK_REQ_OFF_RXSTALE_FLUSHED,
110};
111
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530112/* SPS data structures to support HSUART with BAM
113 * @sps_pipe - This struct defines BAM pipe descriptor
114 * @sps_connect - This struct defines a connection's end point
115 * @sps_register - This struct defines a event registration parameters
116 */
117struct msm_hs_sps_ep_conn_data {
118 struct sps_pipe *pipe_handle;
119 struct sps_connect config;
120 struct sps_register_event event;
121};
122
Mayank Rana55046232011-03-07 10:28:42 +0530123struct msm_hs_tx {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700124 unsigned int tx_ready_int_en; /* ok to dma more tx */
125 unsigned int dma_in_flight; /* tx dma in progress */
Mayank Ranaaf2f0082012-05-22 10:16:02 +0530126 enum flush_reason flush;
127 wait_queue_head_t wait;
Mayank Rana55046232011-03-07 10:28:42 +0530128 struct msm_dmov_cmd xfer;
129 dmov_box *command_ptr;
130 u32 *command_ptr_ptr;
131 dma_addr_t mapped_cmd_ptr;
132 dma_addr_t mapped_cmd_ptr_ptr;
133 int tx_count;
134 dma_addr_t dma_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530136 struct msm_hs_sps_ep_conn_data cons;
Mayank Rana55046232011-03-07 10:28:42 +0530137};
138
Mayank Rana55046232011-03-07 10:28:42 +0530139struct msm_hs_rx {
140 enum flush_reason flush;
141 struct msm_dmov_cmd xfer;
142 dma_addr_t cmdptr_dmaaddr;
143 dmov_box *command_ptr;
144 u32 *command_ptr_ptr;
145 dma_addr_t mapped_cmd_ptr;
146 wait_queue_head_t wait;
147 dma_addr_t rbuffer;
148 unsigned char *buffer;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 unsigned int buffer_pending;
Mayank Rana55046232011-03-07 10:28:42 +0530150 struct dma_pool *pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 struct wake_lock wake_lock;
152 struct delayed_work flip_insert_work;
153 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530154 struct msm_hs_sps_ep_conn_data prod;
Mayank Rana55046232011-03-07 10:28:42 +0530155};
156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700157enum buffer_states {
158 NONE_PENDING = 0x0,
159 FIFO_OVERRUN = 0x1,
160 PARITY_ERROR = 0x2,
161 CHARS_NORMAL = 0x4,
162};
163
164/* optional low power wakeup, typically on a GPIO RX irq */
165struct msm_hs_wakeup {
Mayank Rana55046232011-03-07 10:28:42 +0530166 int irq; /* < 0 indicates low power wakeup disabled */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 unsigned char ignore; /* bool */
168
169 /* bool: inject char into rx tty on wakeup */
Mayank Rana55046232011-03-07 10:28:42 +0530170 unsigned char inject_rx;
171 char rx_to_inject;
172};
173
Mayank Rana55046232011-03-07 10:28:42 +0530174struct msm_hs_port {
175 struct uart_port uport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176 unsigned long imr_reg; /* shadow value of UARTDM_IMR */
Mayank Rana55046232011-03-07 10:28:42 +0530177 struct clk *clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 struct clk *pclk;
Mayank Rana55046232011-03-07 10:28:42 +0530179 struct msm_hs_tx tx;
180 struct msm_hs_rx rx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181 /* gsbi uarts have to do additional writes to gsbi memory */
182 /* block and top control status block. The following pointers */
183 /* keep a handle to these blocks. */
184 unsigned char __iomem *mapped_gsbi;
Mayank Rana55046232011-03-07 10:28:42 +0530185 int dma_tx_channel;
186 int dma_rx_channel;
187 int dma_tx_crci;
188 int dma_rx_crci;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 struct hrtimer clk_off_timer; /* to poll TXEMT before clock off */
Mayank Rana55046232011-03-07 10:28:42 +0530190 ktime_t clk_off_delay;
191 enum msm_hs_clk_states_e clk_state;
192 enum msm_hs_clk_req_off_state_e clk_req_off_state;
193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194 struct msm_hs_wakeup wakeup;
195 struct wake_lock dma_wake_lock; /* held while any DMA active */
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530196
197 struct dentry *loopback_dir;
Mayank Ranacb589d82012-03-01 11:50:03 +0530198 struct work_struct clock_off_w; /* work for actual clock off */
199 struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
200 struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530201 struct work_struct reset_bam_rx; /* work for reset bam rx endpoint */
202 struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
Saket Saurabhce394102012-10-29 19:51:28 +0530203 bool tty_flush_receive;
Mayank Ranaff398d02012-12-18 10:22:50 +0530204 enum uart_core_type uart_type;
205 u32 bam_handle;
206 resource_size_t bam_mem;
207 int bam_irq;
208 unsigned char __iomem *bam_base;
209 unsigned int bam_tx_ep_pipe_index;
210 unsigned int bam_rx_ep_pipe_index;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530211 /* struct sps_event_notify is an argument passed when triggering a
212 * callback event object registered for an SPS connection end point.
213 */
214 struct sps_event_notify notify;
Mayank Rana88d49142013-01-16 17:28:53 +0530215 /* bus client handler */
216 u32 bus_perf_client;
217 /* BLSP UART required BUS Scaling data */
218 struct msm_bus_scale_pdata *bus_scale_table;
Mayank Rana55046232011-03-07 10:28:42 +0530219};
220
221#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
222#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
223#define UARTDM_RX_BUF_SIZE 512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700224#define RETRY_TIMEOUT 5
Saket Saurabh51690e52012-08-17 14:17:46 +0530225#define UARTDM_NR 256
Mayank Ranaff398d02012-12-18 10:22:50 +0530226#define BAM_PIPE_MIN 0
227#define BAM_PIPE_MAX 11
Mayank Rana55046232011-03-07 10:28:42 +0530228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700229static struct dentry *debug_base;
Mayank Rana55046232011-03-07 10:28:42 +0530230static struct msm_hs_port q_uart_port[UARTDM_NR];
231static struct platform_driver msm_serial_hs_platform_driver;
232static struct uart_driver msm_hs_driver;
233static struct uart_ops msm_hs_ops;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530234static void msm_hs_start_rx_locked(struct uart_port *uport);
235static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr);
236static void flip_insert_work(struct work_struct *work);
Mayank Rana55046232011-03-07 10:28:42 +0530237
238#define UARTDM_TO_MSM(uart_port) \
239 container_of((uart_port), struct msm_hs_port, uport)
240
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
242 char *buf)
Mayank Rana55046232011-03-07 10:28:42 +0530243{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700244 int state = 1;
245 enum msm_hs_clk_states_e clk_state;
246 unsigned long flags;
247
248 struct platform_device *pdev = container_of(dev, struct
249 platform_device, dev);
250 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
251
252 spin_lock_irqsave(&msm_uport->uport.lock, flags);
253 clk_state = msm_uport->clk_state;
254 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
255
256 if (clk_state <= MSM_HS_CLK_OFF)
257 state = 0;
258
Mayank Rana18958b02011-09-28 12:33:36 +0530259 return snprintf(buf, PAGE_SIZE, "%d\n", state);
Mayank Rana55046232011-03-07 10:28:42 +0530260}
261
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
263 const char *buf, size_t count)
264{
265 int state;
266 struct platform_device *pdev = container_of(dev, struct
267 platform_device, dev);
268 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
269
270 state = buf[0] - '0';
271 switch (state) {
272 case 0: {
273 msm_hs_request_clock_off(&msm_uport->uport);
274 break;
275 }
276 case 1: {
277 msm_hs_request_clock_on(&msm_uport->uport);
278 break;
279 }
280 default: {
281 return -EINVAL;
282 }
283 }
284 return count;
285}
286
287static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
288
289static inline unsigned int use_low_power_wakeup(struct msm_hs_port *msm_uport)
290{
291 return (msm_uport->wakeup.irq > 0);
292}
293
294static inline int is_gsbi_uart(struct msm_hs_port *msm_uport)
295{
296 /* assume gsbi uart if gsbi resource found in pdata */
297 return ((msm_uport->mapped_gsbi != NULL));
298}
Mayank Ranaff398d02012-12-18 10:22:50 +0530299static unsigned int is_blsp_uart(struct msm_hs_port *msm_uport)
300{
301 return (msm_uport->uart_type == BLSP_HSUART);
302}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700303static inline unsigned int msm_hs_read(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +0530304 unsigned int offset)
305{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700306 return readl_relaxed(uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530307}
308
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309static inline void msm_hs_write(struct uart_port *uport, unsigned int offset,
Mayank Rana55046232011-03-07 10:28:42 +0530310 unsigned int value)
311{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700312 writel_relaxed(value, uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530313}
314
315static void msm_hs_release_port(struct uart_port *port)
316{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700317 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
318 struct platform_device *pdev = to_platform_device(port->dev);
319 struct resource *gsbi_resource;
320 resource_size_t size;
321
322 if (is_gsbi_uart(msm_uport)) {
323 iowrite32(GSBI_PROTOCOL_IDLE, msm_uport->mapped_gsbi +
324 GSBI_CONTROL_ADDR);
325 gsbi_resource = platform_get_resource_byname(pdev,
326 IORESOURCE_MEM,
327 "gsbi_resource");
Mayank Rana53a2c772011-11-01 14:29:14 +0530328 if (unlikely(!gsbi_resource))
329 return;
330
331 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700332 release_mem_region(gsbi_resource->start, size);
333 iounmap(msm_uport->mapped_gsbi);
334 msm_uport->mapped_gsbi = NULL;
335 }
Mayank Rana55046232011-03-07 10:28:42 +0530336}
337
338static int msm_hs_request_port(struct uart_port *port)
339{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700340 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
341 struct platform_device *pdev = to_platform_device(port->dev);
342 struct resource *gsbi_resource;
343 resource_size_t size;
Mayank Rana55046232011-03-07 10:28:42 +0530344
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700345 gsbi_resource = platform_get_resource_byname(pdev,
346 IORESOURCE_MEM,
347 "gsbi_resource");
348 if (gsbi_resource) {
Mayank Rana53a2c772011-11-01 14:29:14 +0530349 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700350 if (unlikely(!request_mem_region(gsbi_resource->start, size,
351 "msm_serial_hs")))
352 return -EBUSY;
353 msm_uport->mapped_gsbi = ioremap(gsbi_resource->start,
354 size);
355 if (!msm_uport->mapped_gsbi) {
356 release_mem_region(gsbi_resource->start, size);
357 return -EBUSY;
358 }
359 }
360 /* no gsbi uart */
Mayank Rana55046232011-03-07 10:28:42 +0530361 return 0;
362}
363
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364static int msm_serial_loopback_enable_set(void *data, u64 val)
365{
366 struct msm_hs_port *msm_uport = data;
367 struct uart_port *uport = &(msm_uport->uport);
368 unsigned long flags;
369 int ret = 0;
370
Mayank Ranacb589d82012-03-01 11:50:03 +0530371 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530373 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700374
375 if (val) {
376 spin_lock_irqsave(&uport->lock, flags);
377 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530378 if (is_blsp_uart(msm_uport))
379 ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
380 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
381 else
382 ret |= UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700383 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
384 spin_unlock_irqrestore(&uport->lock, flags);
385 } else {
386 spin_lock_irqsave(&uport->lock, flags);
387 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530388 if (is_blsp_uart(msm_uport))
389 ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
390 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
391 else
392 ret &= ~UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700393 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
394 spin_unlock_irqrestore(&uport->lock, flags);
395 }
396 /* Calling CLOCK API. Hence mb() requires here. */
397 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +0530398 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700399 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530400 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700401
402 return 0;
403}
404
405static int msm_serial_loopback_enable_get(void *data, u64 *val)
406{
407 struct msm_hs_port *msm_uport = data;
408 struct uart_port *uport = &(msm_uport->uport);
409 unsigned long flags;
410 int ret = 0;
411
Mayank Ranacb589d82012-03-01 11:50:03 +0530412 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530414 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415
416 spin_lock_irqsave(&uport->lock, flags);
417 ret = msm_hs_read(&msm_uport->uport, UARTDM_MR2_ADDR);
418 spin_unlock_irqrestore(&uport->lock, flags);
419
Mayank Ranacb589d82012-03-01 11:50:03 +0530420 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530422 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423
424 *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
425 return 0;
426}
427DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
428 msm_serial_loopback_enable_set, "%llu\n");
429
430/*
431 * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
432 * writing 1 turns on internal loopback mode in HW. Useful for automation
433 * test scripts.
434 * writing 0 disables the internal loopback mode. Default is disabled.
435 */
Stephen Boyd7bce0972012-04-25 11:54:27 -0700436static void __devinit msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700437 int id)
438{
439 char node_name[15];
440 snprintf(node_name, sizeof(node_name), "loopback.%d", id);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530441 msm_uport->loopback_dir = debugfs_create_file(node_name,
442 S_IRUGO | S_IWUSR,
443 debug_base,
444 msm_uport,
445 &loopback_enable_fops);
446
447 if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
448 pr_err("%s(): Cannot create loopback.%d debug entry",
449 __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450}
451
Mayank Rana55046232011-03-07 10:28:42 +0530452static int __devexit msm_hs_remove(struct platform_device *pdev)
453{
454
455 struct msm_hs_port *msm_uport;
456 struct device *dev;
457
458 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
459 printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
460 return -EINVAL;
461 }
462
463 msm_uport = &q_uart_port[pdev->id];
464 dev = msm_uport->uport.dev;
465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530467 debugfs_remove(msm_uport->loopback_dir);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468
Mayank Rana55046232011-03-07 10:28:42 +0530469 dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
470 DMA_TO_DEVICE);
471 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
472 msm_uport->rx.rbuffer);
473 dma_pool_destroy(msm_uport->rx.pool);
474
Mayank Rana8431de82011-12-08 09:06:08 +0530475 dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530476 DMA_TO_DEVICE);
Mayank Rana8431de82011-12-08 09:06:08 +0530477 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530478 DMA_TO_DEVICE);
479 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
480 DMA_TO_DEVICE);
481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700482 wake_lock_destroy(&msm_uport->rx.wake_lock);
483 wake_lock_destroy(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +0530484 destroy_workqueue(msm_uport->hsuart_wq);
485 mutex_destroy(&msm_uport->clk_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486
Mayank Rana55046232011-03-07 10:28:42 +0530487 uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
488 clk_put(msm_uport->clk);
Mayank Ranacb589d82012-03-01 11:50:03 +0530489 if (msm_uport->pclk)
490 clk_put(msm_uport->pclk);
Mayank Rana55046232011-03-07 10:28:42 +0530491
492 /* Free the tx resources */
493 kfree(msm_uport->tx.command_ptr);
494 kfree(msm_uport->tx.command_ptr_ptr);
495
496 /* Free the rx resources */
497 kfree(msm_uport->rx.command_ptr);
498 kfree(msm_uport->rx.command_ptr_ptr);
499
500 iounmap(msm_uport->uport.membase);
501
502 return 0;
503}
504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700505static int msm_hs_init_clk(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530506{
507 int ret;
508 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
509
Mayank Rana55046232011-03-07 10:28:42 +0530510 /* Set up the MREG/NREG/DREG/MNDREG */
511 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
512 if (ret) {
513 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Rana55046232011-03-07 10:28:42 +0530514 return ret;
515 }
516
Mayank Ranacb589d82012-03-01 11:50:03 +0530517 ret = clk_prepare_enable(msm_uport->clk);
Mayank Rana55046232011-03-07 10:28:42 +0530518 if (ret) {
519 printk(KERN_ERR "Error could not turn on UART clk\n");
520 return ret;
521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 if (msm_uport->pclk) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530523 ret = clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700524 if (ret) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530525 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 dev_err(uport->dev,
527 "Error could not turn on UART pclk\n");
528 return ret;
529 }
Mayank Rana55046232011-03-07 10:28:42 +0530530 }
531
532 msm_uport->clk_state = MSM_HS_CLK_ON;
533 return 0;
534}
535
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530536
537/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
538 *
539 * Also registers a SPS callback function for the consumer
540 * process with the SPS driver
541 *
542 * @uport - Pointer to uart uport structure
543 *
544 * @return - 0 if successful else negative value.
545 *
546 */
547
548static int msm_hs_spsconnect_tx(struct uart_port *uport)
549{
550 int ret;
551 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
552 struct msm_hs_tx *tx = &msm_uport->tx;
553 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
554 struct sps_connect *sps_config = &tx->cons.config;
555 struct sps_register_event *sps_event = &tx->cons.event;
556
557 /* Establish connection between peripheral and memory endpoint */
558 ret = sps_connect(sps_pipe_handle, sps_config);
559 if (ret) {
560 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
561 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
562 return ret;
563 }
564 /* Register callback event for EOT (End of transfer) event. */
565 ret = sps_register_event(sps_pipe_handle, sps_event);
566 if (ret) {
567 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
568 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
569 goto reg_event_err;
570 }
571 return 0;
572
573reg_event_err:
574 sps_disconnect(sps_pipe_handle);
575 return ret;
576}
577
578/* Connect a UART peripheral's SPS endpoint(producer endpoint)
579 *
580 * Also registers a SPS callback function for the producer
581 * process with the SPS driver
582 *
583 * @uport - Pointer to uart uport structure
584 *
585 * @return - 0 if successful else negative value.
586 *
587 */
588
589static int msm_hs_spsconnect_rx(struct uart_port *uport)
590{
591 int ret;
592 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
593 struct msm_hs_rx *rx = &msm_uport->rx;
594 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
595 struct sps_connect *sps_config = &rx->prod.config;
596 struct sps_register_event *sps_event = &rx->prod.event;
597
598 /* Establish connection between peripheral and memory endpoint */
599 ret = sps_connect(sps_pipe_handle, sps_config);
600 if (ret) {
601 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
602 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
603 return ret;
604 }
605 /* Register callback event for EOT (End of transfer) event. */
606 ret = sps_register_event(sps_pipe_handle, sps_event);
607 if (ret) {
608 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
609 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
610 goto reg_event_err;
611 }
612 return 0;
613
614reg_event_err:
615 sps_disconnect(sps_pipe_handle);
616 return ret;
617}
618
Mayank Rana55046232011-03-07 10:28:42 +0530619/*
620 * programs the UARTDM_CSR register with correct bit rates
621 *
622 * Interrupts should be disabled before we are called, as
623 * we modify Set Baud rate
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 * Set receive stale interrupt level, dependant on Bit Rate
Mayank Rana55046232011-03-07 10:28:42 +0530625 * Goal is to have around 8 ms before indicate stale.
626 * roundup (((Bit Rate * .008) / 10) + 1
627 */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530628static void msm_hs_set_bps_locked(struct uart_port *uport,
629 unsigned int bps)
Mayank Rana55046232011-03-07 10:28:42 +0530630{
631 unsigned long rxstale;
632 unsigned long data;
633 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
634
635 switch (bps) {
636 case 300:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700637 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00);
Mayank Rana55046232011-03-07 10:28:42 +0530638 rxstale = 1;
639 break;
640 case 600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700641 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11);
Mayank Rana55046232011-03-07 10:28:42 +0530642 rxstale = 1;
643 break;
644 case 1200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700645 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22);
Mayank Rana55046232011-03-07 10:28:42 +0530646 rxstale = 1;
647 break;
648 case 2400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700649 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33);
Mayank Rana55046232011-03-07 10:28:42 +0530650 rxstale = 1;
651 break;
652 case 4800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44);
Mayank Rana55046232011-03-07 10:28:42 +0530654 rxstale = 1;
655 break;
656 case 9600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700657 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55);
Mayank Rana55046232011-03-07 10:28:42 +0530658 rxstale = 2;
659 break;
660 case 14400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700661 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66);
Mayank Rana55046232011-03-07 10:28:42 +0530662 rxstale = 3;
663 break;
664 case 19200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77);
Mayank Rana55046232011-03-07 10:28:42 +0530666 rxstale = 4;
667 break;
668 case 28800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88);
Mayank Rana55046232011-03-07 10:28:42 +0530670 rxstale = 6;
671 break;
672 case 38400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
Mayank Rana55046232011-03-07 10:28:42 +0530674 rxstale = 8;
675 break;
676 case 57600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
Mayank Rana55046232011-03-07 10:28:42 +0530678 rxstale = 16;
679 break;
680 case 76800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
Mayank Rana55046232011-03-07 10:28:42 +0530682 rxstale = 16;
683 break;
684 case 115200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
Mayank Rana55046232011-03-07 10:28:42 +0530686 rxstale = 31;
687 break;
688 case 230400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
Mayank Rana55046232011-03-07 10:28:42 +0530690 rxstale = 31;
691 break;
692 case 460800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530694 rxstale = 31;
695 break;
696 case 4000000:
697 case 3686400:
698 case 3200000:
699 case 3500000:
700 case 3000000:
701 case 2500000:
702 case 1500000:
703 case 1152000:
704 case 1000000:
705 case 921600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530707 rxstale = 31;
708 break;
709 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530711 /* default to 9600 */
712 bps = 9600;
713 rxstale = 2;
714 break;
715 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716 /*
717 * uart baud rate depends on CSR and MND Values
718 * we are updating CSR before and then calling
719 * clk_set_rate which updates MND Values. Hence
720 * dsb requires here.
721 */
722 mb();
723 if (bps > 460800) {
Mayank Rana55046232011-03-07 10:28:42 +0530724 uport->uartclk = bps * 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700725 } else {
726 uport->uartclk = 7372800;
727 }
Mayank Ranae6725162012-08-22 17:44:25 +0530728
Mayank Rana55046232011-03-07 10:28:42 +0530729 if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
730 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Ranae6725162012-08-22 17:44:25 +0530731 WARN_ON(1);
Mayank Rana55046232011-03-07 10:28:42 +0530732 }
733
734 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
735 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
736
737 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530738 /*
739 * It is suggested to do reset of transmitter and receiver after
740 * changing any protocol configuration. Here Baud rate and stale
741 * timeout are getting updated. Hence reset transmitter and receiver.
742 */
743 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
744 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
Mayank Rana55046232011-03-07 10:28:42 +0530745}
746
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700747
748static void msm_hs_set_std_bps_locked(struct uart_port *uport,
749 unsigned int bps)
750{
751 unsigned long rxstale;
752 unsigned long data;
753
754 switch (bps) {
755 case 9600:
756 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
757 rxstale = 2;
758 break;
759 case 14400:
760 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
761 rxstale = 3;
762 break;
763 case 19200:
764 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
765 rxstale = 4;
766 break;
767 case 28800:
768 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
769 rxstale = 6;
770 break;
771 case 38400:
772 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xdd);
773 rxstale = 8;
774 break;
775 case 57600:
776 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
777 rxstale = 16;
778 break;
779 case 115200:
780 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
781 rxstale = 31;
782 break;
783 default:
784 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
785 /* default to 9600 */
786 bps = 9600;
787 rxstale = 2;
788 break;
789 }
790
791 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
792 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
793
794 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530795}
796
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530797
798/* Reset BAM RX Endpoint Pipe Index from workqueue context*/
799
800static void hsuart_reset_bam_rx_work(struct work_struct *w)
801{
802 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
803 reset_bam_rx);
804 struct uart_port *uport = &msm_uport->uport;
805 struct msm_hs_rx *rx = &msm_uport->rx;
806 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
807
808 sps_disconnect(sps_pipe_handle);
809 msm_hs_spsconnect_rx(uport);
810
811 msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
812}
813
Mayank Rana55046232011-03-07 10:28:42 +0530814/*
815 * termios : new ktermios
816 * oldtermios: old ktermios previous setting
817 *
818 * Configure the serial port
819 */
820static void msm_hs_set_termios(struct uart_port *uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700821 struct ktermios *termios,
822 struct ktermios *oldtermios)
Mayank Rana55046232011-03-07 10:28:42 +0530823{
824 unsigned int bps;
825 unsigned long data;
Mayank Rana55046232011-03-07 10:28:42 +0530826 unsigned int c_cflag = termios->c_cflag;
827 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530828 struct msm_hs_rx *rx = &msm_uport->rx;
829 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +0530830
Mayank Ranae6725162012-08-22 17:44:25 +0530831 mutex_lock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +0530832
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530833 /*
834 * Disable Rx channel of UARTDM
835 * DMA Rx Stall happens if enqueue and flush of Rx command happens
836 * concurrently. Hence before changing the baud rate/protocol
837 * configuration and sending flush command to ADM, disable the Rx
838 * channel of UARTDM.
839 * Note: should not reset the receiver here immediately as it is not
840 * suggested to do disable/reset or reset/disable at the same time.
841 */
842 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530843 if (is_blsp_uart(msm_uport)) {
844 /* Disable UARTDM RX BAM Interface */
845 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
846 } else {
847 data &= ~UARTDM_RX_DM_EN_BMSK;
848 }
849
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530850 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530851
852 /* 300 is the minimum baud support by the driver */
853 bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
854
855 /* Temporary remapping 200 BAUD to 3.2 mbps */
856 if (bps == 200)
857 bps = 3200000;
858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700859 uport->uartclk = clk_get_rate(msm_uport->clk);
860 if (!uport->uartclk)
861 msm_hs_set_std_bps_locked(uport, bps);
862 else
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530863 msm_hs_set_bps_locked(uport, bps);
Mayank Rana55046232011-03-07 10:28:42 +0530864
865 data = msm_hs_read(uport, UARTDM_MR2_ADDR);
866 data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
867 /* set parity */
868 if (PARENB == (c_cflag & PARENB)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 if (PARODD == (c_cflag & PARODD)) {
Mayank Rana55046232011-03-07 10:28:42 +0530870 data |= ODD_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700871 } else if (CMSPAR == (c_cflag & CMSPAR)) {
Mayank Rana55046232011-03-07 10:28:42 +0530872 data |= SPACE_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873 } else {
Mayank Rana55046232011-03-07 10:28:42 +0530874 data |= EVEN_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 }
Mayank Rana55046232011-03-07 10:28:42 +0530876 }
877
878 /* Set bits per char */
879 data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
880
881 switch (c_cflag & CSIZE) {
882 case CS5:
883 data |= FIVE_BPC;
884 break;
885 case CS6:
886 data |= SIX_BPC;
887 break;
888 case CS7:
889 data |= SEVEN_BPC;
890 break;
891 default:
892 data |= EIGHT_BPC;
893 break;
894 }
895 /* stop bits */
896 if (c_cflag & CSTOPB) {
897 data |= STOP_BIT_TWO;
898 } else {
899 /* otherwise 1 stop bit */
900 data |= STOP_BIT_ONE;
901 }
902 data |= UARTDM_MR2_ERROR_MODE_BMSK;
903 /* write parity/bits per char/stop bit configuration */
904 msm_hs_write(uport, UARTDM_MR2_ADDR, data);
905
906 /* Configure HW flow control */
907 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
908
909 data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
910
911 if (c_cflag & CRTSCTS) {
912 data |= UARTDM_MR1_CTS_CTL_BMSK;
913 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
914 }
915
916 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
917
918 uport->ignore_status_mask = termios->c_iflag & INPCK;
919 uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
Mayank Ranaadc41562013-01-04 12:44:01 +0530920 uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
Mayank Rana85aeee12012-11-27 14:49:46 +0530921
Mayank Rana55046232011-03-07 10:28:42 +0530922 uport->read_status_mask = (termios->c_cflag & CREAD);
923
924 msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
925
926 /* Set Transmit software time out */
927 uart_update_timeout(uport, c_cflag, bps);
928
929 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
930 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
931
932 if (msm_uport->rx.flush == FLUSH_NONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 wake_lock(&msm_uport->rx.wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +0530934 msm_uport->rx.flush = FLUSH_IGNORE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700935 /*
936 * Before using dmov APIs make sure that
937 * previous writel are completed. Hence
938 * dsb requires here.
939 */
940 mb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530941 if (is_blsp_uart(msm_uport)) {
942 sps_disconnect(sps_pipe_handle);
943 msm_hs_spsconnect_rx(uport);
944 msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
945 } else {
946 /* do discard flush */
947 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
948 }
Mayank Rana55046232011-03-07 10:28:42 +0530949 }
950
951 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 mb();
Mayank Ranae6725162012-08-22 17:44:25 +0530953 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +0530954}
955
956/*
957 * Standard API, Transmitter
958 * Any character in the transmit shift register is sent
959 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960unsigned int msm_hs_tx_empty(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530961{
962 unsigned int data;
963 unsigned int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +0530964
965 data = msm_hs_read(uport, UARTDM_SR_ADDR);
966 if (data & UARTDM_SR_TXEMT_BMSK)
967 ret = TIOCSER_TEMT;
968
Mayank Rana55046232011-03-07 10:28:42 +0530969 return ret;
970}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971EXPORT_SYMBOL(msm_hs_tx_empty);
Mayank Rana55046232011-03-07 10:28:42 +0530972
973/*
974 * Standard API, Stop transmitter.
975 * Any character in the transmit shift register is sent as
976 * well as the current data mover transfer .
977 */
978static void msm_hs_stop_tx_locked(struct uart_port *uport)
979{
980 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
981
982 msm_uport->tx.tx_ready_int_en = 0;
983}
984
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530985/* Disconnect BAM RX Endpoint Pipe Index from workqueue context*/
986static void hsuart_disconnect_rx_endpoint_work(struct work_struct *w)
987{
988 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
989 disconnect_rx_endpoint);
990 struct msm_hs_rx *rx = &msm_uport->rx;
991 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
992
993 sps_disconnect(sps_pipe_handle);
994 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
995 msm_uport->rx.flush = FLUSH_SHUTDOWN;
996 wake_up(&msm_uport->rx.wait);
997}
998
Mayank Rana55046232011-03-07 10:28:42 +0530999/*
1000 * Standard API, Stop receiver as soon as possible.
1001 *
1002 * Function immediately terminates the operation of the
1003 * channel receiver and any incoming characters are lost. None
1004 * of the receiver status bits are affected by this command and
1005 * characters that are already in the receive FIFO there.
1006 */
1007static void msm_hs_stop_rx_locked(struct uart_port *uport)
1008{
1009 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1010 unsigned int data;
1011
Mayank Rana55046232011-03-07 10:28:42 +05301012 /* disable dlink */
1013 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301014 if (is_blsp_uart(msm_uport))
1015 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1016 else
1017 data &= ~UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05301018 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
1019
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020 /* calling DMOV or CLOCK API. Hence mb() */
1021 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301022 /* Disable the receiver */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001023 if (msm_uport->rx.flush == FLUSH_NONE) {
1024 wake_lock(&msm_uport->rx.wake_lock);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301025 if (is_blsp_uart(msm_uport)) {
1026 msm_uport->rx.flush = FLUSH_STOP;
1027 /* workqueue for BAM rx endpoint disconnect */
1028 queue_work(msm_uport->hsuart_wq,
1029 &msm_uport->disconnect_rx_endpoint);
1030 } else {
1031 /* do discard flush */
1032 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
1033 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301035 if (!is_blsp_uart(msm_uport) && msm_uport->rx.flush != FLUSH_SHUTDOWN)
Mayank Rana55046232011-03-07 10:28:42 +05301036 msm_uport->rx.flush = FLUSH_STOP;
Mayank Rana55046232011-03-07 10:28:42 +05301037}
1038
1039/* Transmit the next chunk of data */
1040static void msm_hs_submit_tx_locked(struct uart_port *uport)
1041{
1042 int left;
1043 int tx_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001044 int aligned_tx_count;
Mayank Rana55046232011-03-07 10:28:42 +05301045 dma_addr_t src_addr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001046 dma_addr_t aligned_src_addr;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301047 u32 flags = SPS_IOVEC_FLAG_EOT;
Mayank Rana55046232011-03-07 10:28:42 +05301048 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1049 struct msm_hs_tx *tx = &msm_uport->tx;
1050 struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301051 struct sps_pipe *sps_pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05301052
1053 if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
1054 msm_hs_stop_tx_locked(uport);
1055 return;
1056 }
1057
1058 tx->dma_in_flight = 1;
1059
1060 tx_count = uart_circ_chars_pending(tx_buf);
1061
1062 if (UARTDM_TX_BUF_SIZE < tx_count)
1063 tx_count = UARTDM_TX_BUF_SIZE;
1064
1065 left = UART_XMIT_SIZE - tx_buf->tail;
1066
1067 if (tx_count > left)
1068 tx_count = left;
1069
1070 src_addr = tx->dma_base + tx_buf->tail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001071 /* Mask the src_addr to align on a cache
1072 * and add those bytes to tx_count */
1073 aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
1074 aligned_tx_count = tx_count + src_addr - aligned_src_addr;
1075
1076 dma_sync_single_for_device(uport->dev, aligned_src_addr,
1077 aligned_tx_count, DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301078
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301079 if (is_blsp_uart(msm_uport)) {
1080 /* Issue TX BAM Start IFC command */
1081 msm_hs_write(uport, UARTDM_CR_ADDR, START_TX_BAM_IFC);
1082 } else {
1083 tx->command_ptr->num_rows =
1084 (((tx_count + 15) >> 4) << 16) |
1085 ((tx_count + 15) >> 4);
1086 tx->command_ptr->src_row_addr = src_addr;
Mayank Rana55046232011-03-07 10:28:42 +05301087
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301088 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr,
1089 sizeof(dmov_box), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301090
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301091 *tx->command_ptr_ptr = CMD_PTR_LP |
1092 DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
1093 }
Mayank Rana55046232011-03-07 10:28:42 +05301094
Mayank Rana55046232011-03-07 10:28:42 +05301095 /* Save tx_count to use in Callback */
1096 tx->tx_count = tx_count;
1097 msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
1098
1099 /* Disable the tx_ready interrupt */
1100 msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
1101 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001102 /* Calling next DMOV API. Hence mb() here. */
1103 mb();
1104
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301105 msm_uport->tx.flush = FLUSH_NONE;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301106
1107 if (is_blsp_uart(msm_uport)) {
1108 sps_pipe_handle = tx->cons.pipe_handle;
1109 /* Queue transfer request to SPS */
1110 sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
1111 msm_uport, flags);
1112 } else {
1113 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
1114 sizeof(u32), DMA_TO_DEVICE);
1115
1116 msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
1117 }
Mayank Rana55046232011-03-07 10:28:42 +05301118}
1119
1120/* Start to receive the next chunk of data */
1121static void msm_hs_start_rx_locked(struct uart_port *uport)
1122{
1123 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301124 struct msm_hs_rx *rx = &msm_uport->rx;
1125 struct sps_pipe *sps_pipe_handle;
1126 u32 flags = SPS_IOVEC_FLAG_EOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001127 unsigned int buffer_pending = msm_uport->rx.buffer_pending;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301128 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001129
1130 msm_uport->rx.buffer_pending = 0;
1131 if (buffer_pending && hs_serial_debug_mask)
1132 printk(KERN_ERR "Error: rx started in buffer state = %x",
1133 buffer_pending);
Mayank Rana55046232011-03-07 10:28:42 +05301134
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301135 if (is_blsp_uart(msm_uport)) {
1136 /* Issue RX BAM Start IFC command */
1137 msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
1138 }
Mayank Rana55046232011-03-07 10:28:42 +05301139 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
1140 msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
1141 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
1142 msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301143
1144 /*
1145 * Enable UARTDM Rx Interface as previously it has been
1146 * disable in set_termios before configuring baud rate.
1147 */
1148 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301149 if (is_blsp_uart(msm_uport)) {
1150 /* Enable UARTDM Rx BAM Interface */
1151 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1152 } else {
1153 data |= UARTDM_RX_DM_EN_BMSK;
1154 }
1155
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301156 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +05301157 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001158 /* Calling next DMOV API. Hence mb() here. */
1159 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301160
1161 msm_uport->rx.flush = FLUSH_NONE;
Mayank Rana55046232011-03-07 10:28:42 +05301162
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301163 if (is_blsp_uart(msm_uport)) {
1164 sps_pipe_handle = rx->prod.pipe_handle;
1165 /* Queue transfer request to SPS */
1166 sps_transfer_one(sps_pipe_handle, rx->rbuffer,
1167 UARTDM_RX_BUF_SIZE, msm_uport, flags);
1168 } else {
1169 msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel,
1170 &msm_uport->rx.xfer);
1171 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001172}
1173
1174static void flip_insert_work(struct work_struct *work)
1175{
1176 unsigned long flags;
1177 int retval;
1178 struct msm_hs_port *msm_uport =
1179 container_of(work, struct msm_hs_port,
1180 rx.flip_insert_work.work);
1181 struct tty_struct *tty = msm_uport->uport.state->port.tty;
1182
1183 spin_lock_irqsave(&msm_uport->uport.lock, flags);
1184 if (msm_uport->rx.buffer_pending == NONE_PENDING) {
1185 if (hs_serial_debug_mask)
1186 printk(KERN_ERR "Error: No buffer pending in %s",
1187 __func__);
1188 return;
1189 }
1190 if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
1191 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1192 if (retval)
1193 msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
1194 }
1195 if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
1196 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1197 if (retval)
1198 msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
1199 }
1200 if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
1201 int rx_count, rx_offset;
1202 rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
1203 rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
1204 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer +
1205 rx_offset, rx_count);
1206 msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
1207 PARITY_ERROR);
1208 if (retval != rx_count)
1209 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1210 retval << 8 | (rx_count - retval) << 16;
1211 }
1212 if (msm_uport->rx.buffer_pending)
1213 schedule_delayed_work(&msm_uport->rx.flip_insert_work,
1214 msecs_to_jiffies(RETRY_TIMEOUT));
1215 else
1216 if ((msm_uport->clk_state == MSM_HS_CLK_ON) &&
1217 (msm_uport->rx.flush <= FLUSH_IGNORE)) {
1218 if (hs_serial_debug_mask)
1219 printk(KERN_WARNING
1220 "msm_serial_hs: "
1221 "Pending buffers cleared. "
1222 "Restarting\n");
1223 msm_hs_start_rx_locked(&msm_uport->uport);
1224 }
1225 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1226 tty_flip_buffer_push(tty);
1227}
1228
1229static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr)
1230{
1231 int retval;
1232 int rx_count;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301233 static int remaining_rx_count, bytes_pending;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001234 unsigned long status;
1235 unsigned long flags;
1236 unsigned int error_f = 0;
1237 struct uart_port *uport;
1238 struct msm_hs_port *msm_uport;
1239 unsigned int flush;
1240 struct tty_struct *tty;
1241
1242 msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
1243 struct msm_hs_port, rx.tlet);
1244 uport = &msm_uport->uport;
1245 tty = uport->state->port.tty;
1246
1247 status = msm_hs_read(uport, UARTDM_SR_ADDR);
1248
1249 spin_lock_irqsave(&uport->lock, flags);
1250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001251 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
1252
1253 /* overflow is not connect to data in a FIFO */
1254 if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
1255 (uport->read_status_mask & CREAD))) {
1256 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1257 if (!retval)
1258 msm_uport->rx.buffer_pending |= TTY_OVERRUN;
1259 uport->icount.buf_overrun++;
1260 error_f = 1;
1261 }
1262
1263 if (!(uport->ignore_status_mask & INPCK))
1264 status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
1265
1266 if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
1267 /* Can not tell difference between parity & frame error */
Mayank Rana85aeee12012-11-27 14:49:46 +05301268 if (hs_serial_debug_mask)
1269 printk(KERN_WARNING "msm_serial_hs: parity error\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001270 uport->icount.parity++;
1271 error_f = 1;
Mayank Rana85aeee12012-11-27 14:49:46 +05301272 if (!(uport->ignore_status_mask & IGNPAR)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1274 if (!retval)
1275 msm_uport->rx.buffer_pending |= TTY_PARITY;
1276 }
1277 }
1278
Mayank Rana85aeee12012-11-27 14:49:46 +05301279 if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
1280 if (hs_serial_debug_mask)
1281 printk(KERN_WARNING "msm_serial_hs: Rx break\n");
1282 uport->icount.brk++;
1283 error_f = 1;
1284 if (!(uport->ignore_status_mask & IGNBRK)) {
1285 retval = tty_insert_flip_char(tty, 0, TTY_BREAK);
1286 if (!retval)
1287 msm_uport->rx.buffer_pending |= TTY_BREAK;
1288 }
1289 }
1290
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001291 if (error_f)
1292 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
1293
1294 if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
1295 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
1296 flush = msm_uport->rx.flush;
1297 if (flush == FLUSH_IGNORE)
1298 if (!msm_uport->rx.buffer_pending)
1299 msm_hs_start_rx_locked(uport);
1300
1301 if (flush == FLUSH_STOP) {
1302 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1303 wake_up(&msm_uport->rx.wait);
1304 }
1305 if (flush >= FLUSH_DATA_INVALID)
1306 goto out;
1307
1308 rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
1309
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301310 if (is_blsp_uart(msm_uport)) {
1311 if (rx_count > UARTDM_RX_BUF_SIZE) {
1312 if (bytes_pending) {
1313 rx_count = remaining_rx_count;
1314 bytes_pending = 0;
1315 } else {
1316 remaining_rx_count = rx_count -
1317 UARTDM_RX_BUF_SIZE;
1318 if (remaining_rx_count)
1319 bytes_pending = 1;
1320 rx_count = UARTDM_RX_BUF_SIZE;
1321 }
1322 }
1323 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 /* order the read of rx.buffer */
1325 rmb();
1326
1327 if (0 != (uport->read_status_mask & CREAD)) {
1328 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
1329 rx_count);
1330 if (retval != rx_count) {
1331 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1332 retval << 5 | (rx_count - retval) << 16;
1333 }
1334 }
1335
1336 /* order the read of rx.buffer and the start of next rx xfer */
1337 wmb();
1338
1339 if (!msm_uport->rx.buffer_pending)
1340 msm_hs_start_rx_locked(uport);
1341
1342out:
1343 if (msm_uport->rx.buffer_pending) {
1344 if (hs_serial_debug_mask)
1345 printk(KERN_WARNING
1346 "msm_serial_hs: "
1347 "tty buffer exhausted. "
1348 "Stalling\n");
1349 schedule_delayed_work(&msm_uport->rx.flip_insert_work
1350 , msecs_to_jiffies(RETRY_TIMEOUT));
1351 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 /* release wakelock in 500ms, not immediately, because higher layers
1353 * don't always take wakelocks when they should */
1354 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
1355 /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
1356 spin_unlock_irqrestore(&uport->lock, flags);
1357 if (flush < FLUSH_DATA_INVALID)
1358 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05301359}
1360
1361/* Enable the transmitter Interrupt */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001362static void msm_hs_start_tx_locked(struct uart_port *uport )
Mayank Rana55046232011-03-07 10:28:42 +05301363{
1364 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1365
Mayank Rana55046232011-03-07 10:28:42 +05301366 if (msm_uport->tx.tx_ready_int_en == 0) {
1367 msm_uport->tx.tx_ready_int_en = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 if (msm_uport->tx.dma_in_flight == 0)
1369 msm_hs_submit_tx_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301370 }
Mayank Rana55046232011-03-07 10:28:42 +05301371}
1372
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301373/**
1374 * Callback notification from SPS driver
1375 *
1376 * This callback function gets triggered called from
1377 * SPS driver when requested SPS data transfer is
1378 * completed.
1379 *
1380 */
1381
1382static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
1383{
1384 struct msm_hs_port *msm_uport =
1385 (struct msm_hs_port *)
1386 ((struct sps_event_notify *)notify)->user;
1387
1388 msm_uport->notify = *notify;
1389 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1390 __func__, notify->event_id,
1391 notify->data.transfer.iovec.addr,
1392 notify->data.transfer.iovec.size,
1393 notify->data.transfer.iovec.flags);
1394
1395 tasklet_schedule(&msm_uport->tx.tlet);
1396}
1397
Mayank Rana55046232011-03-07 10:28:42 +05301398/*
1399 * This routine is called when we are done with a DMA transfer
1400 *
1401 * This routine is registered with Data mover when we set
1402 * up a Data Mover transfer. It is called from Data mover ISR
1403 * when the DMA transfer is done.
1404 */
1405static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr,
1406 unsigned int result,
1407 struct msm_dmov_errdata *err)
1408{
Mayank Rana55046232011-03-07 10:28:42 +05301409 struct msm_hs_port *msm_uport;
1410
Mayank Rana55046232011-03-07 10:28:42 +05301411 msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301412 if (msm_uport->tx.flush == FLUSH_STOP)
1413 /* DMA FLUSH unsuccesfful */
1414 WARN_ON(!(result & DMOV_RSLT_FLUSH));
1415 else
1416 /* DMA did not finish properly */
1417 WARN_ON(!(result & DMOV_RSLT_DONE));
Mayank Rana55046232011-03-07 10:28:42 +05301418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419 tasklet_schedule(&msm_uport->tx.tlet);
1420}
1421
1422static void msm_serial_hs_tx_tlet(unsigned long tlet_ptr)
1423{
1424 unsigned long flags;
1425 struct msm_hs_port *msm_uport = container_of((struct tasklet_struct *)
1426 tlet_ptr, struct msm_hs_port, tx.tlet);
1427
1428 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301429 if (msm_uport->tx.flush == FLUSH_STOP) {
1430 msm_uport->tx.flush = FLUSH_SHUTDOWN;
1431 wake_up(&msm_uport->tx.wait);
1432 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1433 return;
1434 }
Mayank Rana55046232011-03-07 10:28:42 +05301435
1436 msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001437 msm_hs_write(&(msm_uport->uport), UARTDM_IMR_ADDR, msm_uport->imr_reg);
1438 /* Calling clk API. Hence mb() requires. */
1439 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001441 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
Mayank Rana55046232011-03-07 10:28:42 +05301442}
1443
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301444/**
1445 * Callback notification from SPS driver
1446 *
1447 * This callback function gets triggered called from
1448 * SPS driver when requested SPS data transfer is
1449 * completed.
1450 *
1451 */
1452
1453static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
1454{
1455
1456 struct msm_hs_port *msm_uport =
1457 (struct msm_hs_port *)
1458 ((struct sps_event_notify *)notify)->user;
1459
1460 msm_uport->notify = *notify;
1461 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1462 __func__, notify->event_id,
1463 notify->data.transfer.iovec.addr,
1464 notify->data.transfer.iovec.size,
1465 notify->data.transfer.iovec.flags);
1466
1467 if (msm_uport->rx.flush == FLUSH_NONE)
1468 tasklet_schedule(&msm_uport->rx.tlet);
1469}
1470
Mayank Rana55046232011-03-07 10:28:42 +05301471/*
1472 * This routine is called when we are done with a DMA transfer or the
1473 * a flush has been sent to the data mover driver.
1474 *
1475 * This routine is registered with Data mover when we set up a Data Mover
1476 * transfer. It is called from Data mover ISR when the DMA transfer is done.
1477 */
1478static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
1479 unsigned int result,
1480 struct msm_dmov_errdata *err)
1481{
Mayank Rana55046232011-03-07 10:28:42 +05301482 struct msm_hs_port *msm_uport;
1483
1484 msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
Mayank Rana55046232011-03-07 10:28:42 +05301485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001486 tasklet_schedule(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05301487}
1488
1489/*
1490 * Standard API, Current states of modem control inputs
1491 *
1492 * Since CTS can be handled entirely by HARDWARE we always
1493 * indicate clear to send and count on the TX FIFO to block when
1494 * it fills up.
1495 *
1496 * - TIOCM_DCD
1497 * - TIOCM_CTS
1498 * - TIOCM_DSR
1499 * - TIOCM_RI
1500 * (Unsupported) DCD and DSR will return them high. RI will return low.
1501 */
1502static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
1503{
1504 return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
1505}
1506
1507/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 * Standard API, Set or clear RFR_signal
1509 *
1510 * Set RFR high, (Indicate we are not ready for data), we disable auto
1511 * ready for receiving and then set RFR_N high. To set RFR to low we just turn
1512 * back auto ready for receiving and it should lower RFR signal
1513 * when hardware is ready
Mayank Rana55046232011-03-07 10:28:42 +05301514 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515void msm_hs_set_mctrl_locked(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +05301516 unsigned int mctrl)
1517{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001518 unsigned int set_rts;
1519 unsigned int data;
Mayank Rana55046232011-03-07 10:28:42 +05301520
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001521 /* RTS is active low */
1522 set_rts = TIOCM_RTS & mctrl ? 0 : 1;
Mayank Rana55046232011-03-07 10:28:42 +05301523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001524 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
1525 if (set_rts) {
1526 /*disable auto ready-for-receiving */
1527 data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
1528 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1529 /* set RFR_N to high */
1530 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
1531 } else {
1532 /* Enable auto ready-for-receiving */
1533 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1534 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1535 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001536 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301537}
1538
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001539void msm_hs_set_mctrl(struct uart_port *uport,
1540 unsigned int mctrl)
1541{
1542 unsigned long flags;
1543
1544 spin_lock_irqsave(&uport->lock, flags);
1545 msm_hs_set_mctrl_locked(uport, mctrl);
1546 spin_unlock_irqrestore(&uport->lock, flags);
1547}
1548EXPORT_SYMBOL(msm_hs_set_mctrl);
1549
Mayank Rana55046232011-03-07 10:28:42 +05301550/* Standard API, Enable modem status (CTS) interrupt */
1551static void msm_hs_enable_ms_locked(struct uart_port *uport)
1552{
1553 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1554
Mayank Rana55046232011-03-07 10:28:42 +05301555 /* Enable DELTA_CTS Interrupt */
1556 msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
1557 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001558 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301559
1560}
1561
Saket Saurabhce394102012-10-29 19:51:28 +05301562static void msm_hs_flush_buffer(struct uart_port *uport)
1563{
1564 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1565
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301566 if (msm_uport->tx.dma_in_flight)
1567 msm_uport->tty_flush_receive = true;
Saket Saurabhce394102012-10-29 19:51:28 +05301568}
1569
Mayank Rana55046232011-03-07 10:28:42 +05301570/*
1571 * Standard API, Break Signal
1572 *
1573 * Control the transmission of a break signal. ctl eq 0 => break
1574 * signal terminate ctl ne 0 => start break signal
1575 */
1576static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
1577{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301581 msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001582 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001583 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301584}
1585
1586static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
1587{
1588 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001589 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301590
Mayank Rana55046232011-03-07 10:28:42 +05301591 if (cfg_flags & UART_CONFIG_TYPE) {
1592 uport->type = PORT_MSM;
1593 msm_hs_request_port(uport);
1594 }
Mayank Ranabbfd2692011-09-20 08:51:17 +05301595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596 if (is_gsbi_uart(msm_uport)) {
Mayank Rana00b6bff2011-08-17 08:33:42 +05301597 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301598 clk_prepare_enable(msm_uport->pclk);
1599 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001600 iowrite32(GSBI_PROTOCOL_UART, msm_uport->mapped_gsbi +
1601 GSBI_CONTROL_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301602 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana00b6bff2011-08-17 08:33:42 +05301603 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301604 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605 }
Mayank Rana55046232011-03-07 10:28:42 +05301606}
1607
1608/* Handle CTS changes (Called from interrupt handler) */
Mayank Ranaee815f32011-12-08 09:06:09 +05301609static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301610{
Mayank Rana55046232011-03-07 10:28:42 +05301611 /* clear interrupt */
1612 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 /* Calling CLOCK API. Hence mb() requires here. */
1614 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301615 uport->icount.cts++;
1616
Mayank Rana55046232011-03-07 10:28:42 +05301617 /* clear the IOCTL TIOCMIWAIT if called */
1618 wake_up_interruptible(&uport->state->port.delta_msr_wait);
1619}
1620
1621/* check if the TX path is flushed, and if so clock off
1622 * returns 0 did not clock off, need to retry (still sending final byte)
1623 * -1 did not clock off, do not retry
1624 * 1 if we clocked off
1625 */
Mayank Ranacb589d82012-03-01 11:50:03 +05301626static int msm_hs_check_clock_off(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301627{
1628 unsigned long sr_status;
Mayank Ranacb589d82012-03-01 11:50:03 +05301629 unsigned long flags;
Mayank Rana88d49142013-01-16 17:28:53 +05301630 int ret;
Mayank Rana55046232011-03-07 10:28:42 +05301631 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1632 struct circ_buf *tx_buf = &uport->state->xmit;
1633
Mayank Ranacb589d82012-03-01 11:50:03 +05301634 mutex_lock(&msm_uport->clk_mutex);
1635 spin_lock_irqsave(&uport->lock, flags);
1636
Mayank Rana55046232011-03-07 10:28:42 +05301637 /* Cancel if tx tty buffer is not empty, dma is in flight,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638 * or tx fifo is not empty */
Mayank Rana55046232011-03-07 10:28:42 +05301639 if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
1640 !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001641 msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) {
Mayank Ranacb589d82012-03-01 11:50:03 +05301642 spin_unlock_irqrestore(&uport->lock, flags);
1643 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301644 return -1;
1645 }
1646
1647 /* Make sure the uart is finished with the last byte */
1648 sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301649 if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) {
1650 spin_unlock_irqrestore(&uport->lock, flags);
1651 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301652 return 0; /* retry */
Mayank Ranacb589d82012-03-01 11:50:03 +05301653 }
Mayank Rana55046232011-03-07 10:28:42 +05301654
1655 /* Make sure forced RXSTALE flush complete */
1656 switch (msm_uport->clk_req_off_state) {
1657 case CLK_REQ_OFF_START:
1658 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301659 if (is_blsp_uart(msm_uport)) {
1660 /* Stale interrupt when RX-FIFO is empty
1661 * will fire if STALE_IRQ_EMPTY bit is set
1662 * for UART Core v1.4
1663 */
1664 msm_hs_write(uport, UARTDM_BCR_ADDR,
1665 UARTDM_BCR_STALE_IRQ_EMPTY);
1666 }
Mayank Rana55046232011-03-07 10:28:42 +05301667 msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668 /*
1669 * Before returning make sure that device writel completed.
1670 * Hence mb() requires here.
1671 */
1672 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +05301673 spin_unlock_irqrestore(&uport->lock, flags);
1674 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301675 return 0; /* RXSTALE flush not complete - retry */
1676 case CLK_REQ_OFF_RXSTALE_ISSUED:
1677 case CLK_REQ_OFF_FLUSH_ISSUED:
Mayank Ranacb589d82012-03-01 11:50:03 +05301678 spin_unlock_irqrestore(&uport->lock, flags);
1679 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301680 return 0; /* RXSTALE flush not complete - retry */
1681 case CLK_REQ_OFF_RXSTALE_FLUSHED:
1682 break; /* continue */
1683 }
1684
1685 if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
1686 if (msm_uport->rx.flush == FLUSH_NONE)
1687 msm_hs_stop_rx_locked(uport);
Mayank Ranacb589d82012-03-01 11:50:03 +05301688
1689 spin_unlock_irqrestore(&uport->lock, flags);
1690 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301691 return 0; /* come back later to really clock off */
1692 }
1693
Mayank Rana55046232011-03-07 10:28:42 +05301694 spin_unlock_irqrestore(&uport->lock, flags);
1695
Mayank Rana55046232011-03-07 10:28:42 +05301696 /* we really want to clock off */
Mayank Ranacb589d82012-03-01 11:50:03 +05301697 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001698 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301699 clk_disable_unprepare(msm_uport->pclk);
1700
Mayank Rana55046232011-03-07 10:28:42 +05301701 msm_uport->clk_state = MSM_HS_CLK_OFF;
Mayank Ranacb589d82012-03-01 11:50:03 +05301702
1703 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001704 if (use_low_power_wakeup(msm_uport)) {
1705 msm_uport->wakeup.ignore = 1;
1706 enable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05301707 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001708 wake_unlock(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +05301709
1710 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301711
1712 /* Reset PNOC Bus Scaling */
1713 if (is_blsp_uart(msm_uport)) {
1714 ret = msm_bus_scale_client_update_request(
1715 msm_uport->bus_perf_client, 0);
1716 if (ret)
1717 pr_err("%s(): Failed to reset bus bw vote\n", __func__);
1718 }
1719
Mayank Ranacb589d82012-03-01 11:50:03 +05301720 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301721 return 1;
1722}
1723
Mayank Ranacb589d82012-03-01 11:50:03 +05301724static void hsuart_clock_off_work(struct work_struct *w)
1725{
1726 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
1727 clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301728 struct uart_port *uport = &msm_uport->uport;
1729
Mayank Ranacb589d82012-03-01 11:50:03 +05301730 if (!msm_hs_check_clock_off(uport)) {
1731 hrtimer_start(&msm_uport->clk_off_timer,
1732 msm_uport->clk_off_delay,
1733 HRTIMER_MODE_REL);
Mayank Rana55046232011-03-07 10:28:42 +05301734 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301735}
Mayank Rana55046232011-03-07 10:28:42 +05301736
Mayank Ranacb589d82012-03-01 11:50:03 +05301737static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
1738{
1739 struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
1740 clk_off_timer);
Mayank Rana55046232011-03-07 10:28:42 +05301741
Mayank Ranacb589d82012-03-01 11:50:03 +05301742 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
1743 return HRTIMER_NORESTART;
Mayank Rana55046232011-03-07 10:28:42 +05301744}
1745
1746static irqreturn_t msm_hs_isr(int irq, void *dev)
1747{
1748 unsigned long flags;
1749 unsigned long isr_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001750 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05301751 struct uart_port *uport = &msm_uport->uport;
1752 struct circ_buf *tx_buf = &uport->state->xmit;
1753 struct msm_hs_tx *tx = &msm_uport->tx;
1754 struct msm_hs_rx *rx = &msm_uport->rx;
1755
1756 spin_lock_irqsave(&uport->lock, flags);
1757
1758 isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
1759
1760 /* Uart RX starting */
1761 if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001762 wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */
Mayank Rana55046232011-03-07 10:28:42 +05301763 msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
1764 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 /* Complete device write for IMR. Hence mb() requires. */
1766 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301767 }
1768 /* Stale rx interrupt */
1769 if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
1770 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
1771 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772 /*
1773 * Complete device write before calling DMOV API. Hence
1774 * mb() requires here.
1775 */
1776 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301777
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301778 if (msm_uport->clk_req_off_state ==
1779 CLK_REQ_OFF_RXSTALE_ISSUED) {
Mayank Rana55046232011-03-07 10:28:42 +05301780 msm_uport->clk_req_off_state =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001781 CLK_REQ_OFF_FLUSH_ISSUED;
1782
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301783 if (is_blsp_uart(msm_uport)) {
1784 /* Reset BCR Register for UARTDM Core v14*/
1785 msm_hs_write(uport, UARTDM_BCR_ADDR, 0x0);
1786 }
1787 }
1788
Mayank Rana55046232011-03-07 10:28:42 +05301789 if (rx->flush == FLUSH_NONE) {
1790 rx->flush = FLUSH_DATA_READY;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301791 if (is_blsp_uart(msm_uport)) {
1792 queue_work(msm_uport->hsuart_wq,
1793 &msm_uport->reset_bam_rx);
1794 } else {
1795 msm_dmov_flush(msm_uport->dma_rx_channel, 1);
1796 }
Mayank Rana55046232011-03-07 10:28:42 +05301797 }
1798 }
1799 /* tx ready interrupt */
1800 if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
1801 /* Clear TX Ready */
1802 msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
1803
1804 if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
1805 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1806 msm_hs_write(uport, UARTDM_IMR_ADDR,
1807 msm_uport->imr_reg);
1808 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001809 /*
1810 * Complete both writes before starting new TX.
1811 * Hence mb() requires here.
1812 */
1813 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301814 /* Complete DMA TX transactions and submit new transactions */
Saket Saurabhce394102012-10-29 19:51:28 +05301815
1816 /* Do not update tx_buf.tail if uart_flush_buffer already
1817 called in serial core */
1818 if (!msm_uport->tty_flush_receive)
1819 tx_buf->tail = (tx_buf->tail +
1820 tx->tx_count) & ~UART_XMIT_SIZE;
1821 else
1822 msm_uport->tty_flush_receive = false;
Mayank Rana55046232011-03-07 10:28:42 +05301823
1824 tx->dma_in_flight = 0;
1825
1826 uport->icount.tx += tx->tx_count;
1827 if (tx->tx_ready_int_en)
1828 msm_hs_submit_tx_locked(uport);
1829
1830 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
1831 uart_write_wakeup(uport);
1832 }
1833 if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
1834 /* TX FIFO is empty */
1835 msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
1836 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837 /*
1838 * Complete device write before starting clock_off request.
1839 * Hence mb() requires here.
1840 */
1841 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +05301842 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301843 }
1844
1845 /* Change in CTS interrupt */
1846 if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
Mayank Ranaee815f32011-12-08 09:06:09 +05301847 msm_hs_handle_delta_cts_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301848
1849 spin_unlock_irqrestore(&uport->lock, flags);
1850
1851 return IRQ_HANDLED;
1852}
1853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001854/* request to turn off uart clock once pending TX is flushed */
1855void msm_hs_request_clock_off(struct uart_port *uport) {
1856 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301857 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1858
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001859 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301860 if (msm_uport->clk_state == MSM_HS_CLK_ON) {
1861 msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
1862 msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
Mayank Rana55046232011-03-07 10:28:42 +05301863 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1864 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001865 /*
1866 * Complete device write before retuning back.
1867 * Hence mb() requires here.
1868 */
1869 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301870 }
Mayank Rana55046232011-03-07 10:28:42 +05301871 spin_unlock_irqrestore(&uport->lock, flags);
1872}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001873EXPORT_SYMBOL(msm_hs_request_clock_off);
Mayank Rana55046232011-03-07 10:28:42 +05301874
Mayank Ranacb589d82012-03-01 11:50:03 +05301875void msm_hs_request_clock_on(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301876{
1877 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Ranacb589d82012-03-01 11:50:03 +05301878 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301879 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001880 int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +05301881
Mayank Ranacb589d82012-03-01 11:50:03 +05301882 mutex_lock(&msm_uport->clk_mutex);
1883 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301884
1885 switch (msm_uport->clk_state) {
1886 case MSM_HS_CLK_OFF:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001887 wake_lock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001888 disable_irq_nosync(msm_uport->wakeup.irq);
Mayank Ranacb589d82012-03-01 11:50:03 +05301889 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301890
1891 /* Vote for PNOC BUS Scaling */
1892 if (is_blsp_uart(msm_uport)) {
1893 ret = msm_bus_scale_client_update_request(
1894 msm_uport->bus_perf_client, 1);
1895 if (ret)
1896 pr_err("%s():Failed to vote for bus scaling.\n",
1897 __func__);
1898 }
1899
Mayank Ranacb589d82012-03-01 11:50:03 +05301900 ret = clk_prepare_enable(msm_uport->clk);
1901 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001902 dev_err(uport->dev, "Clock ON Failure"
Mayank Ranacb589d82012-03-01 11:50:03 +05301903 "For UART CLK Stalling HSUART\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001904 break;
1905 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301906
1907 if (msm_uport->pclk) {
1908 ret = clk_prepare_enable(msm_uport->pclk);
1909 if (unlikely(ret)) {
1910 clk_disable_unprepare(msm_uport->clk);
1911 dev_err(uport->dev, "Clock ON Failure"
1912 "For UART Pclk Stalling HSUART\n");
1913 break;
1914 }
1915 }
1916 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001917 /* else fall-through */
Mayank Rana55046232011-03-07 10:28:42 +05301918 case MSM_HS_CLK_REQUEST_OFF:
1919 if (msm_uport->rx.flush == FLUSH_STOP ||
1920 msm_uport->rx.flush == FLUSH_SHUTDOWN) {
1921 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
1922 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301923 if (is_blsp_uart(msm_uport))
1924 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1925 else
1926 data |= UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05301927 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 /* Complete above device write. Hence mb() here. */
1929 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301930 }
1931 hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
1932 if (msm_uport->rx.flush == FLUSH_SHUTDOWN)
1933 msm_hs_start_rx_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301934 if (msm_uport->rx.flush == FLUSH_STOP)
1935 msm_uport->rx.flush = FLUSH_IGNORE;
1936 msm_uport->clk_state = MSM_HS_CLK_ON;
1937 break;
1938 case MSM_HS_CLK_ON:
1939 break;
1940 case MSM_HS_CLK_PORT_OFF:
1941 break;
1942 }
Mayank Rana55046232011-03-07 10:28:42 +05301943
Mayank Rana55046232011-03-07 10:28:42 +05301944 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Ranacb589d82012-03-01 11:50:03 +05301945 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301946}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001947EXPORT_SYMBOL(msm_hs_request_clock_on);
Mayank Rana55046232011-03-07 10:28:42 +05301948
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
Mayank Rana55046232011-03-07 10:28:42 +05301950{
1951 unsigned int wakeup = 0;
1952 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05301954 struct uart_port *uport = &msm_uport->uport;
1955 struct tty_struct *tty = NULL;
1956
1957 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001958 if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
1959 /* ignore the first irq - it is a pending irq that occured
Mayank Rana55046232011-03-07 10:28:42 +05301960 * before enable_irq() */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001961 if (msm_uport->wakeup.ignore)
1962 msm_uport->wakeup.ignore = 0;
Mayank Rana55046232011-03-07 10:28:42 +05301963 else
1964 wakeup = 1;
1965 }
1966
1967 if (wakeup) {
1968 /* the uart was clocked off during an rx, wake up and
1969 * optionally inject char into tty rx */
Mayank Ranacb589d82012-03-01 11:50:03 +05301970 spin_unlock_irqrestore(&uport->lock, flags);
1971 msm_hs_request_clock_on(uport);
1972 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001973 if (msm_uport->wakeup.inject_rx) {
Mayank Rana55046232011-03-07 10:28:42 +05301974 tty = uport->state->port.tty;
1975 tty_insert_flip_char(tty,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976 msm_uport->wakeup.rx_to_inject,
Mayank Rana55046232011-03-07 10:28:42 +05301977 TTY_NORMAL);
Mayank Rana55046232011-03-07 10:28:42 +05301978 }
1979 }
1980
1981 spin_unlock_irqrestore(&uport->lock, flags);
1982
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001983 if (wakeup && msm_uport->wakeup.inject_rx)
1984 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05301985 return IRQ_HANDLED;
1986}
1987
1988static const char *msm_hs_type(struct uart_port *port)
1989{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001990 return ("MSM HS UART");
Mayank Rana55046232011-03-07 10:28:42 +05301991}
1992
1993/* Called when port is opened */
1994static int msm_hs_startup(struct uart_port *uport)
1995{
1996 int ret;
1997 int rfr_level;
1998 unsigned long flags;
1999 unsigned int data;
2000 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05302001 struct platform_device *pdev = to_platform_device(uport->dev);
2002 const struct msm_serial_hs_platform_data *pdata =
2003 pdev->dev.platform_data;
Mayank Rana55046232011-03-07 10:28:42 +05302004 struct circ_buf *tx_buf = &uport->state->xmit;
2005 struct msm_hs_tx *tx = &msm_uport->tx;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302006 struct msm_hs_rx *rx = &msm_uport->rx;
2007 struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
2008 struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05302009
2010 rfr_level = uport->fifosize;
2011 if (rfr_level > 16)
2012 rfr_level -= 16;
2013
2014 tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
2015 DMA_TO_DEVICE);
2016
Mayank Rana679436e2012-03-31 05:41:14 +05302017 wake_lock(&msm_uport->dma_wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +05302018 /* turn on uart clk */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002019 ret = msm_hs_init_clk(uport);
Mayank Rana55046232011-03-07 10:28:42 +05302020 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302021 pr_err("Turning ON uartclk error\n");
2022 wake_unlock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002023 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05302024 }
2025
Mayank Rana40836782012-11-16 14:45:47 +05302026 if (pdata && pdata->gpio_config)
2027 if (unlikely(pdata->gpio_config(1)))
2028 dev_err(uport->dev, "Cannot configure gpios\n");
2029
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302030
2031 /* SPS Connect for BAM endpoints */
2032 if (is_blsp_uart(msm_uport)) {
2033 /* SPS connect for TX */
2034 ret = msm_hs_spsconnect_tx(uport);
2035 if (ret) {
2036 pr_err("msm_serial_hs: SPS connect failed for TX");
2037 goto deinit_uart_clk;
2038 }
2039
2040 /* SPS connect for RX */
2041 ret = msm_hs_spsconnect_rx(uport);
2042 if (ret) {
2043 pr_err("msm_serial_hs: SPS connect failed for RX");
2044 goto sps_disconnect_tx;
2045 }
2046 }
2047
Mayank Rana55046232011-03-07 10:28:42 +05302048 /* Set auto RFR Level */
2049 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
2050 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
2051 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
2052 data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
2053 data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
2054 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
2055
2056 /* Make sure RXSTALE count is non-zero */
2057 data = msm_hs_read(uport, UARTDM_IPR_ADDR);
2058 if (!data) {
2059 data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
2060 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
2061 }
2062
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302063 if (is_blsp_uart(msm_uport)) {
2064 /* Enable BAM mode */
2065 data = UARTDM_TX_BAM_ENABLE_BMSK | UARTDM_RX_BAM_ENABLE_BMSK;
2066 } else {
2067 /* Enable Data Mover Mode */
2068 data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
2069 }
Mayank Rana55046232011-03-07 10:28:42 +05302070 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
2071
2072 /* Reset TX */
2073 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
2074 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
2075 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
2076 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
2077 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
2078 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
2079 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
2080 /* Turn on Uart Receiver */
2081 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
2082
2083 /* Turn on Uart Transmitter */
2084 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
2085
2086 /* Initialize the tx */
2087 tx->tx_ready_int_en = 0;
2088 tx->dma_in_flight = 0;
2089
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302090 if (!is_blsp_uart(msm_uport)) {
2091 tx->xfer.complete_func = msm_hs_dmov_tx_callback;
Mayank Rana55046232011-03-07 10:28:42 +05302092
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302093 tx->command_ptr->cmd = CMD_LC |
2094 CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
Mayank Rana55046232011-03-07 10:28:42 +05302095
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302096 tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
Mayank Rana55046232011-03-07 10:28:42 +05302097 | (MSM_UARTDM_BURST_SIZE);
2098
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302099 tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16);
Mayank Rana55046232011-03-07 10:28:42 +05302100
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302101 tx->command_ptr->dst_row_addr =
2102 msm_uport->uport.mapbase + UARTDM_TF_ADDR;
2103 }
Mayank Rana55046232011-03-07 10:28:42 +05302104
Mayank Rana55046232011-03-07 10:28:42 +05302105 msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
2106 /* Enable reading the current CTS, no harm even if CTS is ignored */
2107 msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
2108
2109 msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002110 /*
2111 * Complete all device write related configuration before
2112 * queuing RX request. Hence mb() requires here.
2113 */
2114 mb();
Mayank Rana55046232011-03-07 10:28:42 +05302115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002116 if (use_low_power_wakeup(msm_uport)) {
2117 ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
Mayank Rana679436e2012-03-31 05:41:14 +05302118 if (unlikely(ret)) {
2119 pr_err("%s():Err setting wakeup irq\n", __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302120 goto sps_disconnect_rx;
Mayank Rana679436e2012-03-31 05:41:14 +05302121 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002122 }
Mayank Rana55046232011-03-07 10:28:42 +05302123
2124 ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
2125 "msm_hs_uart", msm_uport);
2126 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302127 pr_err("%s():Error getting uart irq\n", __func__);
2128 goto free_wake_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302129 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002130 if (use_low_power_wakeup(msm_uport)) {
Mayank Ranacb589d82012-03-01 11:50:03 +05302131
2132 ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
2133 msm_hs_wakeup_isr,
2134 IRQF_TRIGGER_FALLING,
2135 "msm_hs_wakeup", msm_uport);
2136
Mayank Rana55046232011-03-07 10:28:42 +05302137 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302138 pr_err("%s():Err getting uart wakeup_irq\n", __func__);
2139 goto free_uart_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302140 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002141 disable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05302142 }
2143
Mayank Rana88d49142013-01-16 17:28:53 +05302144 /* Vote for PNOC BUS Scaling */
2145 if (is_blsp_uart(msm_uport)) {
2146 ret = msm_bus_scale_client_update_request(
2147 msm_uport->bus_perf_client, 1);
2148 if (ret)
2149 pr_err("%s(): Failed to vote for bus scaling\n",
2150 __func__);
2151 }
2152
Mayank Rana55046232011-03-07 10:28:42 +05302153 spin_lock_irqsave(&uport->lock, flags);
2154
Mayank Rana55046232011-03-07 10:28:42 +05302155 msm_hs_start_rx_locked(uport);
2156
2157 spin_unlock_irqrestore(&uport->lock, flags);
2158 ret = pm_runtime_set_active(uport->dev);
2159 if (ret)
2160 dev_err(uport->dev, "set active error:%d\n", ret);
2161 pm_runtime_enable(uport->dev);
2162
2163 return 0;
2164
Mayank Rana679436e2012-03-31 05:41:14 +05302165free_uart_irq:
2166 free_irq(uport->irq, msm_uport);
2167free_wake_irq:
2168 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302169sps_disconnect_rx:
2170 if (is_blsp_uart(msm_uport))
2171 sps_disconnect(sps_pipe_handle_rx);
2172sps_disconnect_tx:
2173 if (is_blsp_uart(msm_uport))
2174 sps_disconnect(sps_pipe_handle_tx);
Mayank Rana679436e2012-03-31 05:41:14 +05302175deinit_uart_clk:
Mayank Ranacb589d82012-03-01 11:50:03 +05302176 clk_disable_unprepare(msm_uport->clk);
Mayank Rana679436e2012-03-31 05:41:14 +05302177 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05302178 clk_disable_unprepare(msm_uport->pclk);
Mayank Rana679436e2012-03-31 05:41:14 +05302179 wake_unlock(&msm_uport->dma_wake_lock);
2180
Mayank Rana55046232011-03-07 10:28:42 +05302181 return ret;
2182}
2183
2184/* Initialize tx and rx data structures */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002185static int uartdm_init_port(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05302186{
2187 int ret = 0;
2188 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2189 struct msm_hs_tx *tx = &msm_uport->tx;
2190 struct msm_hs_rx *rx = &msm_uport->rx;
2191
Mayank Rana55046232011-03-07 10:28:42 +05302192 init_waitqueue_head(&rx->wait);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302193 init_waitqueue_head(&tx->wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002194 wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
2195 wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
2196 "msm_serial_hs_dma");
2197
2198 tasklet_init(&rx->tlet, msm_serial_hs_rx_tlet,
2199 (unsigned long) &rx->tlet);
2200 tasklet_init(&tx->tlet, msm_serial_hs_tx_tlet,
2201 (unsigned long) &tx->tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302202
2203 rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
2204 UARTDM_RX_BUF_SIZE, 16, 0);
2205 if (!rx->pool) {
2206 pr_err("%s(): cannot allocate rx_buffer_pool", __func__);
2207 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002208 goto exit_tasket_init;
Mayank Rana55046232011-03-07 10:28:42 +05302209 }
2210
2211 rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
2212 if (!rx->buffer) {
2213 pr_err("%s(): cannot allocate rx->buffer", __func__);
2214 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002215 goto free_pool;
Mayank Rana55046232011-03-07 10:28:42 +05302216 }
2217
Mayank Ranaff398d02012-12-18 10:22:50 +05302218 /* Set up Uart Receive */
2219 msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
2220
2221 INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
2222
2223 if (is_blsp_uart(msm_uport))
2224 return ret;
2225
2226 /* Allocate the command pointer. Needs to be 64 bit aligned */
2227 tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2228 if (!tx->command_ptr) {
2229 return -ENOMEM;
2230 goto free_rx_buffer;
2231 }
2232
2233 tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
2234 if (!tx->command_ptr_ptr) {
2235 ret = -ENOMEM;
2236 goto free_tx_command_ptr;
2237 }
2238
2239 tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
2240 sizeof(dmov_box), DMA_TO_DEVICE);
2241 tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
2242 tx->command_ptr_ptr,
2243 sizeof(u32), DMA_TO_DEVICE);
2244 tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
2245
Mayank Rana55046232011-03-07 10:28:42 +05302246 /* Allocate the command pointer. Needs to be 64 bit aligned */
2247 rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2248 if (!rx->command_ptr) {
2249 pr_err("%s(): cannot allocate rx->command_ptr", __func__);
2250 ret = -ENOMEM;
Mayank Ranaff398d02012-12-18 10:22:50 +05302251 goto free_tx_command_ptr_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302252 }
2253
Mayank Rana8431de82011-12-08 09:06:08 +05302254 rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
Mayank Rana55046232011-03-07 10:28:42 +05302255 if (!rx->command_ptr_ptr) {
2256 pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
2257 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002258 goto free_rx_command_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302259 }
2260
2261 rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
2262 (UARTDM_RX_BUF_SIZE >> 4);
2263
2264 rx->command_ptr->dst_row_addr = rx->rbuffer;
2265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002266 rx->xfer.complete_func = msm_hs_dmov_rx_callback;
2267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002268 rx->command_ptr->cmd = CMD_LC |
2269 CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
2270
2271 rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
2272 | (MSM_UARTDM_BURST_SIZE);
2273 rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE;
2274 rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
2275
Mayank Rana55046232011-03-07 10:28:42 +05302276 rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
2277 sizeof(dmov_box), DMA_TO_DEVICE);
2278
2279 *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
2280
2281 rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
Mayank Rana8431de82011-12-08 09:06:08 +05302282 sizeof(u32), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05302283 rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
2284
Mayank Rana55046232011-03-07 10:28:42 +05302285 return ret;
2286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287free_rx_command_ptr:
Mayank Rana55046232011-03-07 10:28:42 +05302288 kfree(rx->command_ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002289
Mayank Ranaff398d02012-12-18 10:22:50 +05302290free_tx_command_ptr_ptr:
2291 kfree(msm_uport->tx.command_ptr_ptr);
2292 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
2293 sizeof(u32), DMA_TO_DEVICE);
2294 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
2295 sizeof(dmov_box), DMA_TO_DEVICE);
2296
2297free_tx_command_ptr:
2298 kfree(msm_uport->tx.command_ptr);
2299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002300free_rx_buffer:
Mayank Rana55046232011-03-07 10:28:42 +05302301 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002302 msm_uport->rx.rbuffer);
2303
2304free_pool:
Mayank Rana55046232011-03-07 10:28:42 +05302305 dma_pool_destroy(msm_uport->rx.pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002306
2307exit_tasket_init:
2308 wake_lock_destroy(&msm_uport->rx.wake_lock);
2309 wake_lock_destroy(&msm_uport->dma_wake_lock);
2310 tasklet_kill(&msm_uport->tx.tlet);
2311 tasklet_kill(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302312 return ret;
2313}
2314
Mayank Ranaff398d02012-12-18 10:22:50 +05302315struct msm_serial_hs_platform_data
2316 *msm_hs_dt_to_pdata(struct platform_device *pdev)
2317{
2318 struct device_node *node = pdev->dev.of_node;
2319 struct msm_serial_hs_platform_data *pdata;
2320 int rx_to_inject, ret;
2321
2322 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2323 if (!pdata) {
2324 pr_err("unable to allocate memory for platform data\n");
2325 return ERR_PTR(-ENOMEM);
2326 }
2327
2328 /* UART TX GPIO */
2329 pdata->uart_tx_gpio = of_get_named_gpio(node,
2330 "qcom,tx-gpio", 0);
2331 if (pdata->uart_tx_gpio < 0)
2332 pr_debug("uart_tx_gpio is not available\n");
2333
2334 /* UART RX GPIO */
2335 pdata->uart_rx_gpio = of_get_named_gpio(node,
2336 "qcom,rx-gpio", 0);
2337 if (pdata->uart_rx_gpio < 0)
2338 pr_debug("uart_rx_gpio is not available\n");
2339
2340 /* UART CTS GPIO */
2341 pdata->uart_cts_gpio = of_get_named_gpio(node,
2342 "qcom,cts-gpio", 0);
2343 if (pdata->uart_cts_gpio < 0)
2344 pr_debug("uart_cts_gpio is not available\n");
2345
2346 /* UART RFR GPIO */
2347 pdata->uart_rfr_gpio = of_get_named_gpio(node,
2348 "qcom,rfr-gpio", 0);
2349 if (pdata->uart_rfr_gpio < 0)
2350 pr_debug("uart_rfr_gpio is not available\n");
2351
2352 pdata->inject_rx_on_wakeup = of_property_read_bool(node,
2353 "qcom,inject-rx-on-wakeup");
2354
2355 if (pdata->inject_rx_on_wakeup) {
2356 ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
2357 &rx_to_inject);
2358 if (ret < 0) {
2359 pr_err("Error: Rx_char_to_inject not specified.\n");
2360 return ERR_PTR(ret);
2361 }
2362 pdata->rx_to_inject = (char)rx_to_inject;
2363 }
2364
2365 ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
2366 &pdata->bam_tx_ep_pipe_index);
2367 if (ret < 0) {
2368 pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
2369 return ERR_PTR(ret);
2370 }
2371
2372 if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
2373 pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
2374 pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
2375 return ERR_PTR(-EINVAL);
2376 }
2377
2378 ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
2379 &pdata->bam_rx_ep_pipe_index);
2380 if (ret < 0) {
2381 pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
2382 return ERR_PTR(ret);
2383 }
2384
2385 if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
2386 pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
2387 pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
2388 return ERR_PTR(-EINVAL);
2389 }
2390
2391 pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
2392 "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
2393 pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
2394 pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
2395 pdata->uart_rfr_gpio);
2396
2397 return pdata;
2398}
2399
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302400
2401/**
2402 * Deallocate UART peripheral's SPS endpoint
2403 * @msm_uport - Pointer to msm_hs_port structure
2404 * @ep - Pointer to sps endpoint data structure
2405 */
2406
2407static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
2408 struct msm_hs_sps_ep_conn_data *ep)
2409{
2410 struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
2411 struct sps_connect *sps_config = &ep->config;
2412
2413 dma_free_coherent(msm_uport->uport.dev,
2414 sps_config->desc.size,
2415 &sps_config->desc.phys_base,
2416 GFP_KERNEL);
2417 sps_free_endpoint(sps_pipe_handle);
2418}
2419
2420
2421/**
2422 * Allocate UART peripheral's SPS endpoint
2423 *
2424 * This function allocates endpoint context
2425 * by calling appropriate SPS driver APIs.
2426 *
2427 * @msm_uport - Pointer to msm_hs_port structure
2428 * @ep - Pointer to sps endpoint data structure
2429 * @is_produce - 1 means Producer endpoint
2430 * - 0 means Consumer endpoint
2431 *
2432 * @return - 0 if successful else negative value
2433 */
2434
2435static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
2436 struct msm_hs_sps_ep_conn_data *ep,
2437 bool is_producer)
2438{
2439 int rc = 0;
2440 struct sps_pipe *sps_pipe_handle;
2441 struct sps_connect *sps_config = &ep->config;
2442 struct sps_register_event *sps_event = &ep->event;
2443
2444 /* Allocate endpoint context */
2445 sps_pipe_handle = sps_alloc_endpoint();
2446 if (!sps_pipe_handle) {
2447 pr_err("msm_serial_hs: sps_alloc_endpoint() failed!!\n"
2448 "is_producer=%d", is_producer);
2449 rc = -ENOMEM;
2450 goto out;
2451 }
2452
2453 /* Get default connection configuration for an endpoint */
2454 rc = sps_get_config(sps_pipe_handle, sps_config);
2455 if (rc) {
2456 pr_err("msm_serial_hs: sps_get_config() failed!!\n"
2457 "pipe_handle=0x%x rc=%d", (u32)sps_pipe_handle, rc);
2458 goto get_config_err;
2459 }
2460
2461 /* Modify the default connection configuration */
2462 if (is_producer) {
2463 /* For UART producer transfer, source is UART peripheral
2464 where as destination is system memory */
2465 sps_config->source = msm_uport->bam_handle;
2466 sps_config->destination = SPS_DEV_HANDLE_MEM;
2467 sps_config->mode = SPS_MODE_SRC;
2468 sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
2469 sps_config->dest_pipe_index = 0;
2470 sps_config->options = SPS_O_EOT;
2471 } else {
2472 /* For UART consumer transfer, source is system memory
2473 where as destination is UART peripheral */
2474 sps_config->source = SPS_DEV_HANDLE_MEM;
2475 sps_config->destination = msm_uport->bam_handle;
2476 sps_config->mode = SPS_MODE_DEST;
2477 sps_config->src_pipe_index = 0;
2478 sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
2479 sps_config->options = SPS_O_EOT;
2480 }
2481
2482 sps_config->event_thresh = 0x10;
2483
2484 /* Allocate maximum descriptor fifo size */
2485 sps_config->desc.size = 65532;
2486 sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
2487 sps_config->desc.size,
2488 &sps_config->desc.phys_base,
2489 GFP_KERNEL);
2490 if (!sps_config->desc.base) {
2491 rc = -ENOMEM;
2492 pr_err("msm_serial_hs: dma_alloc_coherent() failed!!\n");
2493 goto get_config_err;
2494 }
2495 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
2496
2497 sps_event->mode = SPS_TRIGGER_CALLBACK;
2498 sps_event->options = SPS_O_EOT;
2499 if (is_producer)
2500 sps_event->callback = msm_hs_sps_rx_callback;
2501 else
2502 sps_event->callback = msm_hs_sps_tx_callback;
2503
2504 sps_event->user = (void *)msm_uport;
2505
2506 /* Now save the sps pipe handle */
2507 ep->pipe_handle = sps_pipe_handle;
2508 pr_debug("msm_serial_hs: success !! %s: pipe_handle=0x%x\n"
2509 "desc_fifo.phys_base=0x%x\n",
2510 is_producer ? "READ" : "WRITE",
2511 (u32)sps_pipe_handle, sps_config->desc.phys_base);
2512 return 0;
2513
2514get_config_err:
2515 sps_free_endpoint(sps_pipe_handle);
2516out:
2517 return rc;
2518}
2519
2520/**
2521 * Initialize SPS HW connected with UART core
2522 *
2523 * This function register BAM HW resources with
2524 * SPS driver and then initialize 2 SPS endpoints
2525 *
2526 * msm_uport - Pointer to msm_hs_port structure
2527 *
2528 * @return - 0 if successful else negative value
2529 */
2530
2531static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
2532{
2533 int rc = 0;
2534 struct sps_bam_props bam = {0};
2535 u32 bam_handle;
2536
2537 rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
2538 if (rc || !bam_handle) {
2539 bam.phys_addr = msm_uport->bam_mem;
2540 bam.virt_addr = msm_uport->bam_base;
2541 /*
2542 * This event thresold value is only significant for BAM-to-BAM
2543 * transfer. It's ignored for BAM-to-System mode transfer.
2544 */
2545 bam.event_threshold = 0x10; /* Pipe event threshold */
2546 bam.summing_threshold = 1; /* BAM event threshold */
2547
2548 /* SPS driver wll handle the UART BAM IRQ */
2549 bam.irq = (u32)msm_uport->bam_irq;
2550 bam.manage = SPS_BAM_MGR_LOCAL;
2551
2552 pr_debug("msm_serial_hs: bam physical base=0x%x\n",
2553 (u32)bam.phys_addr);
2554 pr_debug("msm_serial_hs: bam virtual base=0x%x\n",
2555 (u32)bam.virt_addr);
2556
2557 /* Register UART Peripheral BAM device to SPS driver */
2558 rc = sps_register_bam_device(&bam, &bam_handle);
2559 if (rc) {
2560 pr_err("msm_serial_hs: BAM device register failed\n");
2561 return rc;
2562 }
2563 pr_info("msm_serial_hs: BAM device registered. bam_handle=0x%x",
2564 msm_uport->bam_handle);
2565 }
2566 msm_uport->bam_handle = bam_handle;
2567
2568 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
2569 UART_SPS_PROD_PERIPHERAL);
2570 if (rc) {
2571 pr_err("%s: Failed to Init Producer BAM-pipe", __func__);
2572 goto deregister_bam;
2573 }
2574
2575 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
2576 UART_SPS_CONS_PERIPHERAL);
2577 if (rc) {
2578 pr_err("%s: Failed to Init Consumer BAM-pipe", __func__);
2579 goto deinit_ep_conn_prod;
2580 }
2581 return 0;
2582
2583deinit_ep_conn_prod:
2584 msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
2585deregister_bam:
2586 sps_deregister_bam_device(msm_uport->bam_handle);
2587 return rc;
2588}
2589
Mayank Rana55046232011-03-07 10:28:42 +05302590static int __devinit msm_hs_probe(struct platform_device *pdev)
2591{
Mayank Rana88d49142013-01-16 17:28:53 +05302592 int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +05302593 struct uart_port *uport;
2594 struct msm_hs_port *msm_uport;
Mayank Ranaff398d02012-12-18 10:22:50 +05302595 struct resource *core_resource;
2596 struct resource *bam_resource;
Mayank Rana55046232011-03-07 10:28:42 +05302597 struct resource *resource;
Mayank Ranaff398d02012-12-18 10:22:50 +05302598 int core_irqres, bam_irqres;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002599 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
Mayank Ranaff398d02012-12-18 10:22:50 +05302600 struct device_node *node = pdev->dev.of_node;
2601
2602 if (pdev->dev.of_node) {
2603 dev_dbg(&pdev->dev, "device tree enabled\n");
2604 pdata = msm_hs_dt_to_pdata(pdev);
2605 if (IS_ERR(pdata))
2606 return PTR_ERR(pdata);
2607
2608 of_property_read_u32(node, "cell-index",
2609 &pdev->id);
2610
2611 pdev->dev.platform_data = pdata;
2612 }
Mayank Rana55046232011-03-07 10:28:42 +05302613
2614 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
Mayank Ranaff398d02012-12-18 10:22:50 +05302615 pr_err("Invalid plaform device ID = %d\n", pdev->id);
Mayank Rana55046232011-03-07 10:28:42 +05302616 return -EINVAL;
2617 }
2618
2619 msm_uport = &q_uart_port[pdev->id];
2620 uport = &msm_uport->uport;
Mayank Rana55046232011-03-07 10:28:42 +05302621 uport->dev = &pdev->dev;
2622
Mayank Ranaff398d02012-12-18 10:22:50 +05302623 if (pdev->dev.of_node)
2624 msm_uport->uart_type = BLSP_HSUART;
Mayank Rana55046232011-03-07 10:28:42 +05302625
Mayank Ranaff398d02012-12-18 10:22:50 +05302626 /* Get required resources for BAM HSUART */
2627 if (is_blsp_uart(msm_uport)) {
2628 core_resource = platform_get_resource_byname(pdev,
2629 IORESOURCE_MEM, "core_mem");
2630 bam_resource = platform_get_resource_byname(pdev,
2631 IORESOURCE_MEM, "bam_mem");
2632 core_irqres = platform_get_irq_byname(pdev, "core_irq");
2633 bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002634
Mayank Ranaff398d02012-12-18 10:22:50 +05302635 if (!core_resource) {
2636 pr_err("Invalid core HSUART Resources.\n");
2637 return -ENXIO;
2638 }
2639
2640 if (!bam_resource) {
2641 pr_err("Invalid BAM HSUART Resources.\n");
2642 return -ENXIO;
2643 }
2644
2645 if (!core_irqres) {
2646 pr_err("Invalid core irqres Resources.\n");
2647 return -ENXIO;
2648 }
2649 if (!bam_irqres) {
2650 pr_err("Invalid bam irqres Resources.\n");
2651 return -ENXIO;
2652 }
2653
2654 uport->mapbase = core_resource->start;
2655
2656 uport->membase = ioremap(uport->mapbase,
2657 resource_size(core_resource));
2658 if (unlikely(!uport->membase)) {
2659 pr_err("UART Resource ioremap Failed.\n");
2660 return -ENOMEM;
2661 }
2662 msm_uport->bam_mem = bam_resource->start;
2663 msm_uport->bam_base = ioremap(msm_uport->bam_mem,
2664 resource_size(bam_resource));
2665 if (unlikely(!msm_uport->bam_base)) {
2666 pr_err("UART BAM Resource ioremap Failed.\n");
2667 iounmap(uport->membase);
2668 return -ENOMEM;
2669 }
2670
2671 uport->irq = core_irqres;
2672 msm_uport->bam_irq = bam_irqres;
2673
Mayank Rana88d49142013-01-16 17:28:53 +05302674 msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2675 if (!msm_uport->bus_scale_table) {
2676 pr_err("BLSP UART: Bus scaling is disabled\n");
2677 goto unmap_memory;
2678 } else {
2679 msm_uport->bus_perf_client =
2680 msm_bus_scale_register_client
2681 (msm_uport->bus_scale_table);
2682 if (IS_ERR(&msm_uport->bus_perf_client)) {
2683 pr_err("%s(): Bus client register failed.\n",
2684 __func__);
2685 goto unmap_memory;
2686 }
2687 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302688 } else {
2689
2690 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2691 if (unlikely(!resource))
2692 return -ENXIO;
2693 uport->mapbase = resource->start;
2694 uport->membase = ioremap(uport->mapbase,
2695 resource_size(resource));
2696 if (unlikely(!uport->membase))
2697 return -ENOMEM;
2698
2699 uport->irq = platform_get_irq(pdev, 0);
2700 if (unlikely((int)uport->irq < 0)) {
2701 pr_err("UART IRQ Failed.\n");
2702 iounmap(uport->membase);
2703 return -ENXIO;
2704 }
2705 }
Mayank Rana55046232011-03-07 10:28:42 +05302706
Mayank Rana55046232011-03-07 10:28:42 +05302707 if (pdata == NULL)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002708 msm_uport->wakeup.irq = -1;
2709 else {
2710 msm_uport->wakeup.irq = pdata->wakeup_irq;
2711 msm_uport->wakeup.ignore = 1;
2712 msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
2713 msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
2714
Mayank Ranaff398d02012-12-18 10:22:50 +05302715 if (unlikely(msm_uport->wakeup.irq < 0)) {
2716 ret = -ENXIO;
2717 goto unmap_memory;
2718 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002719
Mayank Ranaff398d02012-12-18 10:22:50 +05302720 if (is_blsp_uart(msm_uport)) {
2721 msm_uport->bam_tx_ep_pipe_index =
2722 pdata->bam_tx_ep_pipe_index;
2723 msm_uport->bam_rx_ep_pipe_index =
2724 pdata->bam_rx_ep_pipe_index;
2725 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002726 }
Mayank Rana55046232011-03-07 10:28:42 +05302727
Mayank Ranaff398d02012-12-18 10:22:50 +05302728 if (!is_blsp_uart(msm_uport)) {
Mayank Rana55046232011-03-07 10:28:42 +05302729
Mayank Ranaff398d02012-12-18 10:22:50 +05302730 resource = platform_get_resource_byname(pdev,
2731 IORESOURCE_DMA, "uartdm_channels");
2732 if (unlikely(!resource)) {
2733 ret = -ENXIO;
2734 goto unmap_memory;
2735 }
2736
2737 msm_uport->dma_tx_channel = resource->start;
2738 msm_uport->dma_rx_channel = resource->end;
2739
2740 resource = platform_get_resource_byname(pdev,
2741 IORESOURCE_DMA, "uartdm_crci");
2742 if (unlikely(!resource)) {
2743 ret = -ENXIO;
2744 goto unmap_memory;
2745 }
2746
2747 msm_uport->dma_tx_crci = resource->start;
2748 msm_uport->dma_rx_crci = resource->end;
2749 }
Mayank Rana55046232011-03-07 10:28:42 +05302750
2751 uport->iotype = UPIO_MEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002752 uport->fifosize = 64;
Mayank Rana55046232011-03-07 10:28:42 +05302753 uport->ops = &msm_hs_ops;
2754 uport->flags = UPF_BOOT_AUTOCONF;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002755 uport->uartclk = 7372800;
Mayank Rana55046232011-03-07 10:28:42 +05302756 msm_uport->imr_reg = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002757
Matt Wagantalle2522372011-08-17 14:52:21 -07002758 msm_uport->clk = clk_get(&pdev->dev, "core_clk");
Mayank Ranaff398d02012-12-18 10:22:50 +05302759 if (IS_ERR(msm_uport->clk)) {
2760 ret = PTR_ERR(msm_uport->clk);
2761 goto unmap_memory;
2762 }
Mayank Rana55046232011-03-07 10:28:42 +05302763
Matt Wagantalle2522372011-08-17 14:52:21 -07002764 msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002765 /*
2766 * Some configurations do not require explicit pclk control so
2767 * do not flag error on pclk get failure.
2768 */
2769 if (IS_ERR(msm_uport->pclk))
2770 msm_uport->pclk = NULL;
2771
2772 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
2773 if (ret) {
2774 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Ranaff398d02012-12-18 10:22:50 +05302775 goto unmap_memory;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002776 }
2777
Mayank Ranacb589d82012-03-01 11:50:03 +05302778 msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
2779 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
2780 if (!msm_uport->hsuart_wq) {
2781 pr_err("%s(): Unable to create workqueue hsuart_wq\n",
2782 __func__);
Mayank Ranaff398d02012-12-18 10:22:50 +05302783 ret = -ENOMEM;
2784 goto unmap_memory;
Mayank Ranacb589d82012-03-01 11:50:03 +05302785 }
2786
2787 INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302788
2789 /* Init work for Reset Rx bam endpoints */
2790 INIT_WORK(&msm_uport->reset_bam_rx, hsuart_reset_bam_rx_work);
2791
2792 /* Init work for sps_disconnect in stop_rx_locked */
2793 INIT_WORK(&msm_uport->disconnect_rx_endpoint,
2794 hsuart_disconnect_rx_endpoint_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05302795 mutex_init(&msm_uport->clk_mutex);
2796
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302797 /* Initialize SPS HW connected with UART core */
2798 if (is_blsp_uart(msm_uport)) {
2799 ret = msm_hs_sps_init(msm_uport);
2800 if (unlikely(ret)) {
2801 pr_err("SPS Initialization failed ! err=%d", ret);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05302802 goto workqueue_destroy;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302803 }
2804 }
2805
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002806 clk_prepare_enable(msm_uport->clk);
2807 if (msm_uport->pclk)
2808 clk_prepare_enable(msm_uport->pclk);
2809
Mayank Rana55046232011-03-07 10:28:42 +05302810 ret = uartdm_init_port(uport);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002811 if (unlikely(ret)) {
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05302812 goto err_clock;
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002813 }
Mayank Rana55046232011-03-07 10:28:42 +05302814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002815 /* configure the CR Protection to Enable */
2816 msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002817
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002819 /*
2820 * Enable Command register protection before going ahead as this hw
2821 * configuration makes sure that issued cmd to CR register gets complete
2822 * before next issued cmd start. Hence mb() requires here.
2823 */
2824 mb();
Mayank Rana55046232011-03-07 10:28:42 +05302825
2826 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
2827 hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
2828 HRTIMER_MODE_REL);
2829 msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
2830 msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
2831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002832 ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
2833 if (unlikely(ret))
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05302834 goto err_clock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002835
2836 msm_serial_debugfs_init(msm_uport, pdev->id);
2837
Mayank Rana55046232011-03-07 10:28:42 +05302838 uport->line = pdev->id;
Saket Saurabh51690e52012-08-17 14:17:46 +05302839 if (pdata != NULL && pdata->userid && pdata->userid <= UARTDM_NR)
2840 uport->line = pdata->userid;
Mayank Ranaff398d02012-12-18 10:22:50 +05302841 ret = uart_add_one_port(&msm_hs_driver, uport);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05302842 if (!ret) {
2843 clk_disable_unprepare(msm_uport->clk);
2844 if (msm_uport->pclk)
2845 clk_disable_unprepare(msm_uport->pclk);
Mayank Ranaff398d02012-12-18 10:22:50 +05302846 return ret;
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05302847 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302848
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05302849err_clock:
2850 clk_disable_unprepare(msm_uport->clk);
2851 if (msm_uport->pclk)
2852 clk_disable_unprepare(msm_uport->pclk);
2853workqueue_destroy:
2854 destroy_workqueue(msm_uport->hsuart_wq);
Mayank Ranaff398d02012-12-18 10:22:50 +05302855unmap_memory:
2856 iounmap(uport->membase);
2857 if (is_blsp_uart(msm_uport))
2858 iounmap(msm_uport->bam_base);
2859
2860 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05302861}
2862
2863static int __init msm_serial_hs_init(void)
2864{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002865 int ret;
2866 int i;
Mayank Rana55046232011-03-07 10:28:42 +05302867
2868 /* Init all UARTS as non-configured */
2869 for (i = 0; i < UARTDM_NR; i++)
2870 q_uart_port[i].uport.type = PORT_UNKNOWN;
2871
Mayank Rana55046232011-03-07 10:28:42 +05302872 ret = uart_register_driver(&msm_hs_driver);
2873 if (unlikely(ret)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
2875 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05302876 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 debug_base = debugfs_create_dir("msm_serial_hs", NULL);
2878 if (IS_ERR_OR_NULL(debug_base))
2879 pr_info("msm_serial_hs: Cannot create debugfs dir\n");
Mayank Rana55046232011-03-07 10:28:42 +05302880
2881 ret = platform_driver_register(&msm_serial_hs_platform_driver);
2882 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002883 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
2884 debugfs_remove_recursive(debug_base);
2885 uart_unregister_driver(&msm_hs_driver);
2886 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05302887 }
2888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889 printk(KERN_INFO "msm_serial_hs module loaded\n");
Mayank Rana55046232011-03-07 10:28:42 +05302890 return ret;
2891}
Mayank Rana55046232011-03-07 10:28:42 +05302892
2893/*
2894 * Called by the upper layer when port is closed.
2895 * - Disables the port
2896 * - Unhook the ISR
2897 */
2898static void msm_hs_shutdown(struct uart_port *uport)
2899{
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302900 int ret;
2901 unsigned int data;
2902 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05302903 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05302904 struct platform_device *pdev = to_platform_device(uport->dev);
2905 const struct msm_serial_hs_platform_data *pdata =
2906 pdev->dev.platform_data;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302907 struct msm_hs_tx *tx = &msm_uport->tx;
2908 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05302909
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302910 if (msm_uport->tx.dma_in_flight) {
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302911 if (!is_blsp_uart(msm_uport)) {
2912 spin_lock_irqsave(&uport->lock, flags);
2913 /* disable UART TX interface to DM */
2914 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
2915 data &= ~UARTDM_TX_DM_EN_BMSK;
2916 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
2917 /* turn OFF UART Transmitter */
2918 msm_hs_write(uport, UARTDM_CR_ADDR,
2919 UARTDM_CR_TX_DISABLE_BMSK);
2920 /* reset UART TX */
2921 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
2922 /* reset UART TX Error */
2923 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX_ERROR);
2924 msm_uport->tx.flush = FLUSH_STOP;
2925 spin_unlock_irqrestore(&uport->lock, flags);
2926 /* discard flush */
2927 msm_dmov_flush(msm_uport->dma_tx_channel, 0);
2928 ret = wait_event_timeout(msm_uport->tx.wait,
2929 msm_uport->tx.flush == FLUSH_SHUTDOWN, 100);
2930 if (!ret)
2931 pr_err("%s():HSUART TX Stalls.\n", __func__);
2932 } else {
2933 /* BAM Disconnect for TX */
2934 sps_disconnect(sps_pipe_handle);
2935 }
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302936 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002937 tasklet_kill(&msm_uport->tx.tlet);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302938 BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002939 wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
2940 tasklet_kill(&msm_uport->rx.tlet);
2941 cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05302942 flush_workqueue(msm_uport->hsuart_wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002943 pm_runtime_disable(uport->dev);
2944 pm_runtime_set_suspended(uport->dev);
Mayank Rana55046232011-03-07 10:28:42 +05302945
2946 /* Disable the transmitter */
2947 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
2948 /* Disable the receiver */
2949 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
2950
Mayank Rana55046232011-03-07 10:28:42 +05302951 msm_uport->imr_reg = 0;
2952 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002953 /*
2954 * Complete all device write before actually disabling uartclk.
2955 * Hence mb() requires here.
2956 */
2957 mb();
Mayank Rana88d49142013-01-16 17:28:53 +05302958
2959 /* Reset PNOC Bus Scaling */
2960 if (is_blsp_uart(msm_uport)) {
2961 ret = msm_bus_scale_client_update_request(
2962 msm_uport->bus_perf_client, 0);
2963 if (ret)
2964 pr_err("%s(): Failed to reset bus bw vote\n", __func__);
2965 }
2966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002967 if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
Mayank Ranacb589d82012-03-01 11:50:03 +05302968 /* to balance clk_state */
2969 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002970 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05302971 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002972 wake_unlock(&msm_uport->dma_wake_lock);
2973 }
Mayank Rana55046232011-03-07 10:28:42 +05302974
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302975 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
Mayank Rana55046232011-03-07 10:28:42 +05302976 dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
2977 UART_XMIT_SIZE, DMA_TO_DEVICE);
2978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002979 if (use_low_power_wakeup(msm_uport))
2980 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Mayank Rana55046232011-03-07 10:28:42 +05302981
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002982 /* Free the interrupt */
2983 free_irq(uport->irq, msm_uport);
2984 if (use_low_power_wakeup(msm_uport))
2985 free_irq(msm_uport->wakeup.irq, msm_uport);
Mayank Rana40836782012-11-16 14:45:47 +05302986
2987 if (pdata && pdata->gpio_config)
2988 if (pdata->gpio_config(0))
2989 dev_err(uport->dev, "GPIO config error\n");
Mayank Rana55046232011-03-07 10:28:42 +05302990}
2991
2992static void __exit msm_serial_hs_exit(void)
2993{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002994 printk(KERN_INFO "msm_serial_hs module removed\n");
Mayank Rana17e0e1a2012-04-07 02:10:33 +05302995 debugfs_remove_recursive(debug_base);
Mayank Rana55046232011-03-07 10:28:42 +05302996 platform_driver_unregister(&msm_serial_hs_platform_driver);
2997 uart_unregister_driver(&msm_hs_driver);
2998}
Mayank Rana55046232011-03-07 10:28:42 +05302999
Mayank Rana55046232011-03-07 10:28:42 +05303000static int msm_hs_runtime_idle(struct device *dev)
3001{
3002 /*
3003 * returning success from idle results in runtime suspend to be
3004 * called
3005 */
3006 return 0;
3007}
3008
3009static int msm_hs_runtime_resume(struct device *dev)
3010{
3011 struct platform_device *pdev = container_of(dev, struct
3012 platform_device, dev);
3013 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303014 msm_hs_request_clock_on(&msm_uport->uport);
3015 return 0;
3016}
3017
3018static int msm_hs_runtime_suspend(struct device *dev)
3019{
3020 struct platform_device *pdev = container_of(dev, struct
3021 platform_device, dev);
3022 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303023 msm_hs_request_clock_off(&msm_uport->uport);
3024 return 0;
3025}
Mayank Rana55046232011-03-07 10:28:42 +05303026
3027static const struct dev_pm_ops msm_hs_dev_pm_ops = {
3028 .runtime_suspend = msm_hs_runtime_suspend,
3029 .runtime_resume = msm_hs_runtime_resume,
3030 .runtime_idle = msm_hs_runtime_idle,
3031};
3032
Mayank Ranaff398d02012-12-18 10:22:50 +05303033static struct of_device_id msm_hs_match_table[] = {
3034 { .compatible = "qcom,msm-hsuart-v14" },
3035 {}
3036};
3037
Mayank Rana55046232011-03-07 10:28:42 +05303038static struct platform_driver msm_serial_hs_platform_driver = {
Mayank Rana17e0e1a2012-04-07 02:10:33 +05303039 .probe = msm_hs_probe,
Mayank Rana55046232011-03-07 10:28:42 +05303040 .remove = __devexit_p(msm_hs_remove),
3041 .driver = {
3042 .name = "msm_serial_hs",
Mayank Rana55046232011-03-07 10:28:42 +05303043 .pm = &msm_hs_dev_pm_ops,
Mayank Ranaff398d02012-12-18 10:22:50 +05303044 .of_match_table = msm_hs_match_table,
Mayank Rana55046232011-03-07 10:28:42 +05303045 },
3046};
3047
3048static struct uart_driver msm_hs_driver = {
3049 .owner = THIS_MODULE,
3050 .driver_name = "msm_serial_hs",
3051 .dev_name = "ttyHS",
3052 .nr = UARTDM_NR,
3053 .cons = 0,
3054};
3055
3056static struct uart_ops msm_hs_ops = {
3057 .tx_empty = msm_hs_tx_empty,
3058 .set_mctrl = msm_hs_set_mctrl_locked,
3059 .get_mctrl = msm_hs_get_mctrl_locked,
3060 .stop_tx = msm_hs_stop_tx_locked,
3061 .start_tx = msm_hs_start_tx_locked,
3062 .stop_rx = msm_hs_stop_rx_locked,
3063 .enable_ms = msm_hs_enable_ms_locked,
3064 .break_ctl = msm_hs_break_ctl,
3065 .startup = msm_hs_startup,
3066 .shutdown = msm_hs_shutdown,
3067 .set_termios = msm_hs_set_termios,
Mayank Rana55046232011-03-07 10:28:42 +05303068 .type = msm_hs_type,
3069 .config_port = msm_hs_config_port,
3070 .release_port = msm_hs_release_port,
3071 .request_port = msm_hs_request_port,
Saket Saurabhce394102012-10-29 19:51:28 +05303072 .flush_buffer = msm_hs_flush_buffer,
Mayank Rana55046232011-03-07 10:28:42 +05303073};
3074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003075module_init(msm_serial_hs_init);
3076module_exit(msm_serial_hs_exit);
Mayank Rana55046232011-03-07 10:28:42 +05303077MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
3078MODULE_VERSION("1.2");
3079MODULE_LICENSE("GPL v2");