blob: ad7c702ba8f8bc6c8e5fddd6f76e48ee38f62198 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* drivers/serial/msm_serial_hs.c
Mayank Rana55046232011-03-07 10:28:42 +05302 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 * MSM 7k High speed uart driver
4 *
Mayank Rana55046232011-03-07 10:28:42 +05305 * Copyright (c) 2008 Google Inc.
Mayank Ranaadc41562013-01-04 12:44:01 +05306 * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
Mayank Rana55046232011-03-07 10:28:42 +05307 * Modified: Nick Pelly <npelly@google.com>
8 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 * All source code in this file is licensed under the following license
10 * except where indicated.
11 *
Mayank Rana55046232011-03-07 10:28:42 +053012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * Has optional support for uart power management independent of linux
22 * suspend/resume:
23 *
24 * RX wakeup.
25 * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
26 * UART RX pin). This should only be used if there is not a wakeup
27 * GPIO on the UART CTS, and the first RX byte is known (for example, with the
28 * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
29 * always be lost. RTS will be asserted even while the UART is off in this mode
30 * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
31 */
32
33#include <linux/module.h>
34
35#include <linux/serial.h>
36#include <linux/serial_core.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/interrupt.h>
40#include <linux/irq.h>
41#include <linux/io.h>
42#include <linux/ioport.h>
Saket Saurabh10e88b32013-02-04 15:26:34 +053043#include <linux/atomic.h>
Mayank Rana55046232011-03-07 10:28:42 +053044#include <linux/kernel.h>
45#include <linux/timer.h>
46#include <linux/clk.h>
47#include <linux/platform_device.h>
48#include <linux/pm_runtime.h>
49#include <linux/dma-mapping.h>
50#include <linux/dmapool.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070051#include <linux/tty_flip.h>
Mayank Rana55046232011-03-07 10:28:42 +053052#include <linux/wait.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053#include <linux/sysfs.h>
54#include <linux/stat.h>
55#include <linux/device.h>
56#include <linux/wakelock.h>
57#include <linux/debugfs.h>
Mayank Ranaff398d02012-12-18 10:22:50 +053058#include <linux/of.h>
59#include <linux/of_device.h>
60#include <linux/of_gpio.h>
Saket Saurabhfe3b93b2013-02-04 18:44:12 +053061#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#include <asm/atomic.h>
Mayank Rana55046232011-03-07 10:28:42 +053063#include <asm/irq.h>
Mayank Rana55046232011-03-07 10:28:42 +053064
65#include <mach/hardware.h>
66#include <mach/dma.h>
Saket Saurabhcbf6c522013-01-07 16:30:37 +053067#include <mach/sps.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068#include <mach/msm_serial_hs.h>
Mayank Rana88d49142013-01-16 17:28:53 +053069#include <mach/msm_bus.h>
Mayank Rana55046232011-03-07 10:28:42 +053070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#include "msm_serial_hs_hwreg.h"
Saket Saurabhcbf6c522013-01-07 16:30:37 +053072#define UART_SPS_CONS_PERIPHERAL 0
73#define UART_SPS_PROD_PERIPHERAL 1
Mayank Rana55046232011-03-07 10:28:42 +053074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075static int hs_serial_debug_mask = 1;
76module_param_named(debug_mask, hs_serial_debug_mask,
77 int, S_IRUGO | S_IWUSR | S_IWGRP);
Mayank Ranaff398d02012-12-18 10:22:50 +053078/*
79 * There are 3 different kind of UART Core available on MSM.
80 * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
81 * and BSLP based HSUART.
82 */
83enum uart_core_type {
84 LEGACY_HSUART,
85 GSBI_HSUART,
86 BLSP_HSUART,
87};
Mayank Rana55046232011-03-07 10:28:42 +053088
Mayank Rana55046232011-03-07 10:28:42 +053089enum flush_reason {
90 FLUSH_NONE,
91 FLUSH_DATA_READY,
92 FLUSH_DATA_INVALID, /* values after this indicate invalid data */
93 FLUSH_IGNORE = FLUSH_DATA_INVALID,
94 FLUSH_STOP,
95 FLUSH_SHUTDOWN,
96};
97
Mayank Rana55046232011-03-07 10:28:42 +053098enum msm_hs_clk_states_e {
99 MSM_HS_CLK_PORT_OFF, /* port not in use */
100 MSM_HS_CLK_OFF, /* clock disabled */
101 MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
102 MSM_HS_CLK_ON, /* clock enabled */
103};
104
105/* Track the forced RXSTALE flush during clock off sequence.
106 * These states are only valid during MSM_HS_CLK_REQUEST_OFF */
107enum msm_hs_clk_req_off_state_e {
108 CLK_REQ_OFF_START,
109 CLK_REQ_OFF_RXSTALE_ISSUED,
110 CLK_REQ_OFF_FLUSH_ISSUED,
111 CLK_REQ_OFF_RXSTALE_FLUSHED,
112};
113
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530114/* SPS data structures to support HSUART with BAM
115 * @sps_pipe - This struct defines BAM pipe descriptor
116 * @sps_connect - This struct defines a connection's end point
117 * @sps_register - This struct defines a event registration parameters
118 */
119struct msm_hs_sps_ep_conn_data {
120 struct sps_pipe *pipe_handle;
121 struct sps_connect config;
122 struct sps_register_event event;
123};
124
Mayank Rana55046232011-03-07 10:28:42 +0530125struct msm_hs_tx {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 unsigned int tx_ready_int_en; /* ok to dma more tx */
127 unsigned int dma_in_flight; /* tx dma in progress */
Mayank Ranaaf2f0082012-05-22 10:16:02 +0530128 enum flush_reason flush;
129 wait_queue_head_t wait;
Mayank Rana55046232011-03-07 10:28:42 +0530130 struct msm_dmov_cmd xfer;
131 dmov_box *command_ptr;
132 u32 *command_ptr_ptr;
133 dma_addr_t mapped_cmd_ptr;
134 dma_addr_t mapped_cmd_ptr_ptr;
135 int tx_count;
136 dma_addr_t dma_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530138 struct msm_hs_sps_ep_conn_data cons;
Mayank Rana55046232011-03-07 10:28:42 +0530139};
140
Mayank Rana55046232011-03-07 10:28:42 +0530141struct msm_hs_rx {
142 enum flush_reason flush;
143 struct msm_dmov_cmd xfer;
144 dma_addr_t cmdptr_dmaaddr;
145 dmov_box *command_ptr;
146 u32 *command_ptr_ptr;
147 dma_addr_t mapped_cmd_ptr;
148 wait_queue_head_t wait;
149 dma_addr_t rbuffer;
150 unsigned char *buffer;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 unsigned int buffer_pending;
Mayank Rana55046232011-03-07 10:28:42 +0530152 struct dma_pool *pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 struct wake_lock wake_lock;
154 struct delayed_work flip_insert_work;
155 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530156 struct msm_hs_sps_ep_conn_data prod;
Mayank Rana55046232011-03-07 10:28:42 +0530157};
158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159enum buffer_states {
160 NONE_PENDING = 0x0,
161 FIFO_OVERRUN = 0x1,
162 PARITY_ERROR = 0x2,
163 CHARS_NORMAL = 0x4,
164};
165
166/* optional low power wakeup, typically on a GPIO RX irq */
167struct msm_hs_wakeup {
Mayank Rana55046232011-03-07 10:28:42 +0530168 int irq; /* < 0 indicates low power wakeup disabled */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 unsigned char ignore; /* bool */
170
171 /* bool: inject char into rx tty on wakeup */
Mayank Rana55046232011-03-07 10:28:42 +0530172 unsigned char inject_rx;
173 char rx_to_inject;
174};
175
Mayank Rana55046232011-03-07 10:28:42 +0530176struct msm_hs_port {
177 struct uart_port uport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 unsigned long imr_reg; /* shadow value of UARTDM_IMR */
Mayank Rana55046232011-03-07 10:28:42 +0530179 struct clk *clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 struct clk *pclk;
Mayank Rana55046232011-03-07 10:28:42 +0530181 struct msm_hs_tx tx;
182 struct msm_hs_rx rx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 /* gsbi uarts have to do additional writes to gsbi memory */
184 /* block and top control status block. The following pointers */
185 /* keep a handle to these blocks. */
186 unsigned char __iomem *mapped_gsbi;
Mayank Rana55046232011-03-07 10:28:42 +0530187 int dma_tx_channel;
188 int dma_rx_channel;
189 int dma_tx_crci;
190 int dma_rx_crci;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 struct hrtimer clk_off_timer; /* to poll TXEMT before clock off */
Mayank Rana55046232011-03-07 10:28:42 +0530192 ktime_t clk_off_delay;
193 enum msm_hs_clk_states_e clk_state;
194 enum msm_hs_clk_req_off_state_e clk_req_off_state;
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 struct msm_hs_wakeup wakeup;
197 struct wake_lock dma_wake_lock; /* held while any DMA active */
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530198
199 struct dentry *loopback_dir;
Mayank Ranacb589d82012-03-01 11:50:03 +0530200 struct work_struct clock_off_w; /* work for actual clock off */
201 struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
202 struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530203 struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
Saket Saurabhce394102012-10-29 19:51:28 +0530204 bool tty_flush_receive;
Mayank Ranaff398d02012-12-18 10:22:50 +0530205 enum uart_core_type uart_type;
206 u32 bam_handle;
207 resource_size_t bam_mem;
208 int bam_irq;
209 unsigned char __iomem *bam_base;
210 unsigned int bam_tx_ep_pipe_index;
211 unsigned int bam_rx_ep_pipe_index;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530212 /* struct sps_event_notify is an argument passed when triggering a
213 * callback event object registered for an SPS connection end point.
214 */
215 struct sps_event_notify notify;
Mayank Rana88d49142013-01-16 17:28:53 +0530216 /* bus client handler */
217 u32 bus_perf_client;
218 /* BLSP UART required BUS Scaling data */
219 struct msm_bus_scale_pdata *bus_scale_table;
Mayank Rana9c8bda92013-02-28 11:58:04 +0530220 bool rx_discard_flush_issued;
Mayank Rana05396b22013-03-16 19:10:11 +0530221 int rx_count_callback;
Mayank Rana55046232011-03-07 10:28:42 +0530222};
223
224#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
225#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
226#define UARTDM_RX_BUF_SIZE 512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227#define RETRY_TIMEOUT 5
Saket Saurabh51690e52012-08-17 14:17:46 +0530228#define UARTDM_NR 256
Mayank Ranaff398d02012-12-18 10:22:50 +0530229#define BAM_PIPE_MIN 0
230#define BAM_PIPE_MAX 11
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530231#define BUS_SCALING 1
232#define BUS_RESET 0
Mayank Rana9c8bda92013-02-28 11:58:04 +0530233#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
Saket Saurabh676247c2013-01-17 15:19:08 +0530234#define BLSP_UART_CLK_FMAX 63160000
Mayank Rana55046232011-03-07 10:28:42 +0530235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236static struct dentry *debug_base;
Mayank Rana55046232011-03-07 10:28:42 +0530237static struct msm_hs_port q_uart_port[UARTDM_NR];
238static struct platform_driver msm_serial_hs_platform_driver;
239static struct uart_driver msm_hs_driver;
240static struct uart_ops msm_hs_ops;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530241static void msm_hs_start_rx_locked(struct uart_port *uport);
242static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr);
243static void flip_insert_work(struct work_struct *work);
Mayank Rana55046232011-03-07 10:28:42 +0530244
245#define UARTDM_TO_MSM(uart_port) \
246 container_of((uart_port), struct msm_hs_port, uport)
247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
249 char *buf)
Mayank Rana55046232011-03-07 10:28:42 +0530250{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251 int state = 1;
252 enum msm_hs_clk_states_e clk_state;
253 unsigned long flags;
254
255 struct platform_device *pdev = container_of(dev, struct
256 platform_device, dev);
257 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
258
259 spin_lock_irqsave(&msm_uport->uport.lock, flags);
260 clk_state = msm_uport->clk_state;
261 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
262
263 if (clk_state <= MSM_HS_CLK_OFF)
264 state = 0;
265
Mayank Rana18958b02011-09-28 12:33:36 +0530266 return snprintf(buf, PAGE_SIZE, "%d\n", state);
Mayank Rana55046232011-03-07 10:28:42 +0530267}
268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t count)
271{
272 int state;
273 struct platform_device *pdev = container_of(dev, struct
274 platform_device, dev);
275 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
276
277 state = buf[0] - '0';
278 switch (state) {
279 case 0: {
280 msm_hs_request_clock_off(&msm_uport->uport);
281 break;
282 }
283 case 1: {
284 msm_hs_request_clock_on(&msm_uport->uport);
285 break;
286 }
287 default: {
288 return -EINVAL;
289 }
290 }
291 return count;
292}
293
294static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
295
296static inline unsigned int use_low_power_wakeup(struct msm_hs_port *msm_uport)
297{
298 return (msm_uport->wakeup.irq > 0);
299}
300
301static inline int is_gsbi_uart(struct msm_hs_port *msm_uport)
302{
303 /* assume gsbi uart if gsbi resource found in pdata */
304 return ((msm_uport->mapped_gsbi != NULL));
305}
Mayank Ranaff398d02012-12-18 10:22:50 +0530306static unsigned int is_blsp_uart(struct msm_hs_port *msm_uport)
307{
308 return (msm_uport->uart_type == BLSP_HSUART);
309}
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530310
311static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
312{
313 int ret;
314
315 if (is_blsp_uart(msm_uport) && msm_uport->bus_perf_client) {
316 pr_debug("Bus voting:%d\n", vote);
317 ret = msm_bus_scale_client_update_request(
318 msm_uport->bus_perf_client, vote);
319 if (ret)
320 pr_err("%s(): Failed for Bus voting: %d\n",
321 __func__, vote);
322 }
323}
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325static inline unsigned int msm_hs_read(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +0530326 unsigned int offset)
327{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 return readl_relaxed(uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530329}
330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331static inline void msm_hs_write(struct uart_port *uport, unsigned int offset,
Mayank Rana55046232011-03-07 10:28:42 +0530332 unsigned int value)
333{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 writel_relaxed(value, uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530335}
336
337static void msm_hs_release_port(struct uart_port *port)
338{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
340 struct platform_device *pdev = to_platform_device(port->dev);
341 struct resource *gsbi_resource;
342 resource_size_t size;
343
344 if (is_gsbi_uart(msm_uport)) {
345 iowrite32(GSBI_PROTOCOL_IDLE, msm_uport->mapped_gsbi +
346 GSBI_CONTROL_ADDR);
347 gsbi_resource = platform_get_resource_byname(pdev,
348 IORESOURCE_MEM,
349 "gsbi_resource");
Mayank Rana53a2c772011-11-01 14:29:14 +0530350 if (unlikely(!gsbi_resource))
351 return;
352
353 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 release_mem_region(gsbi_resource->start, size);
355 iounmap(msm_uport->mapped_gsbi);
356 msm_uport->mapped_gsbi = NULL;
357 }
Mayank Rana55046232011-03-07 10:28:42 +0530358}
359
360static int msm_hs_request_port(struct uart_port *port)
361{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
363 struct platform_device *pdev = to_platform_device(port->dev);
364 struct resource *gsbi_resource;
365 resource_size_t size;
Mayank Rana55046232011-03-07 10:28:42 +0530366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 gsbi_resource = platform_get_resource_byname(pdev,
368 IORESOURCE_MEM,
369 "gsbi_resource");
370 if (gsbi_resource) {
Mayank Rana53a2c772011-11-01 14:29:14 +0530371 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 if (unlikely(!request_mem_region(gsbi_resource->start, size,
373 "msm_serial_hs")))
374 return -EBUSY;
375 msm_uport->mapped_gsbi = ioremap(gsbi_resource->start,
376 size);
377 if (!msm_uport->mapped_gsbi) {
378 release_mem_region(gsbi_resource->start, size);
379 return -EBUSY;
380 }
381 }
382 /* no gsbi uart */
Mayank Rana55046232011-03-07 10:28:42 +0530383 return 0;
384}
385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386static int msm_serial_loopback_enable_set(void *data, u64 val)
387{
388 struct msm_hs_port *msm_uport = data;
389 struct uart_port *uport = &(msm_uport->uport);
390 unsigned long flags;
391 int ret = 0;
392
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530393 msm_hs_bus_voting(msm_uport, BUS_SCALING);
394
Mayank Ranacb589d82012-03-01 11:50:03 +0530395 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530397 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398
399 if (val) {
400 spin_lock_irqsave(&uport->lock, flags);
401 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530402 if (is_blsp_uart(msm_uport))
403 ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
404 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
405 else
406 ret |= UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
408 spin_unlock_irqrestore(&uport->lock, flags);
409 } else {
410 spin_lock_irqsave(&uport->lock, flags);
411 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530412 if (is_blsp_uart(msm_uport))
413 ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
414 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
415 else
416 ret &= ~UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
418 spin_unlock_irqrestore(&uport->lock, flags);
419 }
420 /* Calling CLOCK API. Hence mb() requires here. */
421 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +0530422 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530424 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530426 msm_hs_bus_voting(msm_uport, BUS_RESET);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 return 0;
428}
429
430static int msm_serial_loopback_enable_get(void *data, u64 *val)
431{
432 struct msm_hs_port *msm_uport = data;
433 struct uart_port *uport = &(msm_uport->uport);
434 unsigned long flags;
435 int ret = 0;
436
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530437 msm_hs_bus_voting(msm_uport, BUS_SCALING);
438
Mayank Ranacb589d82012-03-01 11:50:03 +0530439 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530441 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442
443 spin_lock_irqsave(&uport->lock, flags);
444 ret = msm_hs_read(&msm_uport->uport, UARTDM_MR2_ADDR);
445 spin_unlock_irqrestore(&uport->lock, flags);
446
Mayank Ranacb589d82012-03-01 11:50:03 +0530447 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530449 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450
451 *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530452
453 msm_hs_bus_voting(msm_uport, BUS_RESET);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 return 0;
455}
456DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
457 msm_serial_loopback_enable_set, "%llu\n");
458
459/*
460 * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
461 * writing 1 turns on internal loopback mode in HW. Useful for automation
462 * test scripts.
463 * writing 0 disables the internal loopback mode. Default is disabled.
464 */
Stephen Boyd7bce0972012-04-25 11:54:27 -0700465static void __devinit msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466 int id)
467{
468 char node_name[15];
469 snprintf(node_name, sizeof(node_name), "loopback.%d", id);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530470 msm_uport->loopback_dir = debugfs_create_file(node_name,
471 S_IRUGO | S_IWUSR,
472 debug_base,
473 msm_uport,
474 &loopback_enable_fops);
475
476 if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
477 pr_err("%s(): Cannot create loopback.%d debug entry",
478 __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479}
480
Mayank Rana55046232011-03-07 10:28:42 +0530481static int __devexit msm_hs_remove(struct platform_device *pdev)
482{
483
484 struct msm_hs_port *msm_uport;
485 struct device *dev;
486
487 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
488 printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
489 return -EINVAL;
490 }
491
492 msm_uport = &q_uart_port[pdev->id];
493 dev = msm_uport->uport.dev;
494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530496 debugfs_remove(msm_uport->loopback_dir);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497
Mayank Rana55046232011-03-07 10:28:42 +0530498 dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
499 DMA_TO_DEVICE);
500 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
501 msm_uport->rx.rbuffer);
502 dma_pool_destroy(msm_uport->rx.pool);
503
Mayank Rana8431de82011-12-08 09:06:08 +0530504 dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530505 DMA_TO_DEVICE);
Mayank Rana8431de82011-12-08 09:06:08 +0530506 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530507 DMA_TO_DEVICE);
508 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
509 DMA_TO_DEVICE);
510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 wake_lock_destroy(&msm_uport->rx.wake_lock);
512 wake_lock_destroy(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +0530513 destroy_workqueue(msm_uport->hsuart_wq);
514 mutex_destroy(&msm_uport->clk_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515
Mayank Rana55046232011-03-07 10:28:42 +0530516 uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
517 clk_put(msm_uport->clk);
Mayank Ranacb589d82012-03-01 11:50:03 +0530518 if (msm_uport->pclk)
519 clk_put(msm_uport->pclk);
Mayank Rana55046232011-03-07 10:28:42 +0530520
521 /* Free the tx resources */
522 kfree(msm_uport->tx.command_ptr);
523 kfree(msm_uport->tx.command_ptr_ptr);
524
525 /* Free the rx resources */
526 kfree(msm_uport->rx.command_ptr);
527 kfree(msm_uport->rx.command_ptr_ptr);
528
529 iounmap(msm_uport->uport.membase);
530
531 return 0;
532}
533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534static int msm_hs_init_clk(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530535{
536 int ret;
537 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
538
Mayank Rana55046232011-03-07 10:28:42 +0530539 /* Set up the MREG/NREG/DREG/MNDREG */
540 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
541 if (ret) {
542 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Rana55046232011-03-07 10:28:42 +0530543 return ret;
544 }
545
Mayank Ranacb589d82012-03-01 11:50:03 +0530546 ret = clk_prepare_enable(msm_uport->clk);
Mayank Rana55046232011-03-07 10:28:42 +0530547 if (ret) {
548 printk(KERN_ERR "Error could not turn on UART clk\n");
549 return ret;
550 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 if (msm_uport->pclk) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530552 ret = clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 if (ret) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530554 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 dev_err(uport->dev,
556 "Error could not turn on UART pclk\n");
557 return ret;
558 }
Mayank Rana55046232011-03-07 10:28:42 +0530559 }
560
561 msm_uport->clk_state = MSM_HS_CLK_ON;
562 return 0;
563}
564
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530565
566/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
567 *
568 * Also registers a SPS callback function for the consumer
569 * process with the SPS driver
570 *
571 * @uport - Pointer to uart uport structure
572 *
573 * @return - 0 if successful else negative value.
574 *
575 */
576
577static int msm_hs_spsconnect_tx(struct uart_port *uport)
578{
579 int ret;
580 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
581 struct msm_hs_tx *tx = &msm_uport->tx;
582 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
583 struct sps_connect *sps_config = &tx->cons.config;
584 struct sps_register_event *sps_event = &tx->cons.event;
585
586 /* Establish connection between peripheral and memory endpoint */
587 ret = sps_connect(sps_pipe_handle, sps_config);
588 if (ret) {
589 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
590 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
591 return ret;
592 }
593 /* Register callback event for EOT (End of transfer) event. */
594 ret = sps_register_event(sps_pipe_handle, sps_event);
595 if (ret) {
596 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
597 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
598 goto reg_event_err;
599 }
600 return 0;
601
602reg_event_err:
603 sps_disconnect(sps_pipe_handle);
604 return ret;
605}
606
607/* Connect a UART peripheral's SPS endpoint(producer endpoint)
608 *
609 * Also registers a SPS callback function for the producer
610 * process with the SPS driver
611 *
612 * @uport - Pointer to uart uport structure
613 *
614 * @return - 0 if successful else negative value.
615 *
616 */
617
618static int msm_hs_spsconnect_rx(struct uart_port *uport)
619{
620 int ret;
621 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
622 struct msm_hs_rx *rx = &msm_uport->rx;
623 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
624 struct sps_connect *sps_config = &rx->prod.config;
625 struct sps_register_event *sps_event = &rx->prod.event;
626
627 /* Establish connection between peripheral and memory endpoint */
628 ret = sps_connect(sps_pipe_handle, sps_config);
629 if (ret) {
630 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
631 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
632 return ret;
633 }
Saket Saurabh0dafb3c2013-04-12 11:44:04 +0530634 /* Register callback event for DESC_DONE event. */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530635 ret = sps_register_event(sps_pipe_handle, sps_event);
636 if (ret) {
637 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
638 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
639 goto reg_event_err;
640 }
641 return 0;
642
643reg_event_err:
644 sps_disconnect(sps_pipe_handle);
645 return ret;
646}
647
Mayank Rana55046232011-03-07 10:28:42 +0530648/*
649 * programs the UARTDM_CSR register with correct bit rates
650 *
651 * Interrupts should be disabled before we are called, as
652 * we modify Set Baud rate
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 * Set receive stale interrupt level, dependant on Bit Rate
Mayank Rana55046232011-03-07 10:28:42 +0530654 * Goal is to have around 8 ms before indicate stale.
655 * roundup (((Bit Rate * .008) / 10) + 1
656 */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530657static void msm_hs_set_bps_locked(struct uart_port *uport,
658 unsigned int bps)
Mayank Rana55046232011-03-07 10:28:42 +0530659{
660 unsigned long rxstale;
661 unsigned long data;
662 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
663
664 switch (bps) {
665 case 300:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00);
Mayank Rana55046232011-03-07 10:28:42 +0530667 rxstale = 1;
668 break;
669 case 600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11);
Mayank Rana55046232011-03-07 10:28:42 +0530671 rxstale = 1;
672 break;
673 case 1200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22);
Mayank Rana55046232011-03-07 10:28:42 +0530675 rxstale = 1;
676 break;
677 case 2400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33);
Mayank Rana55046232011-03-07 10:28:42 +0530679 rxstale = 1;
680 break;
681 case 4800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44);
Mayank Rana55046232011-03-07 10:28:42 +0530683 rxstale = 1;
684 break;
685 case 9600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55);
Mayank Rana55046232011-03-07 10:28:42 +0530687 rxstale = 2;
688 break;
689 case 14400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66);
Mayank Rana55046232011-03-07 10:28:42 +0530691 rxstale = 3;
692 break;
693 case 19200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77);
Mayank Rana55046232011-03-07 10:28:42 +0530695 rxstale = 4;
696 break;
697 case 28800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88);
Mayank Rana55046232011-03-07 10:28:42 +0530699 rxstale = 6;
700 break;
701 case 38400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
Mayank Rana55046232011-03-07 10:28:42 +0530703 rxstale = 8;
704 break;
705 case 57600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
Mayank Rana55046232011-03-07 10:28:42 +0530707 rxstale = 16;
708 break;
709 case 76800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
Mayank Rana55046232011-03-07 10:28:42 +0530711 rxstale = 16;
712 break;
713 case 115200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
Mayank Rana55046232011-03-07 10:28:42 +0530715 rxstale = 31;
716 break;
717 case 230400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
Mayank Rana55046232011-03-07 10:28:42 +0530719 rxstale = 31;
720 break;
721 case 460800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530723 rxstale = 31;
724 break;
725 case 4000000:
726 case 3686400:
727 case 3200000:
728 case 3500000:
729 case 3000000:
730 case 2500000:
731 case 1500000:
732 case 1152000:
733 case 1000000:
734 case 921600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700735 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530736 rxstale = 31;
737 break;
738 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530740 /* default to 9600 */
741 bps = 9600;
742 rxstale = 2;
743 break;
744 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 /*
746 * uart baud rate depends on CSR and MND Values
747 * we are updating CSR before and then calling
748 * clk_set_rate which updates MND Values. Hence
749 * dsb requires here.
750 */
751 mb();
752 if (bps > 460800) {
Mayank Rana55046232011-03-07 10:28:42 +0530753 uport->uartclk = bps * 16;
Saket Saurabh676247c2013-01-17 15:19:08 +0530754 if (is_blsp_uart(msm_uport)) {
755 /* BLSP based UART supports maximum clock frequency
756 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
757 * UART can support baud rate of 3.94 Mbps which is
758 * equivalent to 4 Mbps.
759 * UART hardware is robust enough to handle this
760 * deviation to achieve baud rate ~4 Mbps.
761 */
762 if (bps == 4000000)
763 uport->uartclk = BLSP_UART_CLK_FMAX;
764 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 } else {
766 uport->uartclk = 7372800;
767 }
Mayank Ranae6725162012-08-22 17:44:25 +0530768
Mayank Rana55046232011-03-07 10:28:42 +0530769 if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
770 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Ranae6725162012-08-22 17:44:25 +0530771 WARN_ON(1);
Mayank Rana55046232011-03-07 10:28:42 +0530772 }
773
774 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
775 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
776
777 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530778 /*
779 * It is suggested to do reset of transmitter and receiver after
780 * changing any protocol configuration. Here Baud rate and stale
781 * timeout are getting updated. Hence reset transmitter and receiver.
782 */
783 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
784 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
Mayank Rana55046232011-03-07 10:28:42 +0530785}
786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787
788static void msm_hs_set_std_bps_locked(struct uart_port *uport,
789 unsigned int bps)
790{
791 unsigned long rxstale;
792 unsigned long data;
793
794 switch (bps) {
795 case 9600:
796 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
797 rxstale = 2;
798 break;
799 case 14400:
800 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
801 rxstale = 3;
802 break;
803 case 19200:
804 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
805 rxstale = 4;
806 break;
807 case 28800:
808 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
809 rxstale = 6;
810 break;
811 case 38400:
812 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xdd);
813 rxstale = 8;
814 break;
815 case 57600:
816 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
817 rxstale = 16;
818 break;
819 case 115200:
820 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
821 rxstale = 31;
822 break;
823 default:
824 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
825 /* default to 9600 */
826 bps = 9600;
827 rxstale = 2;
828 break;
829 }
830
831 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
832 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
833
834 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530835}
836
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530837
Mayank Rana55046232011-03-07 10:28:42 +0530838/*
839 * termios : new ktermios
840 * oldtermios: old ktermios previous setting
841 *
842 * Configure the serial port
843 */
844static void msm_hs_set_termios(struct uart_port *uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 struct ktermios *termios,
846 struct ktermios *oldtermios)
Mayank Rana55046232011-03-07 10:28:42 +0530847{
848 unsigned int bps;
849 unsigned long data;
Mayank Rana9c8bda92013-02-28 11:58:04 +0530850 int ret;
Mayank Rana55046232011-03-07 10:28:42 +0530851 unsigned int c_cflag = termios->c_cflag;
852 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530853 struct msm_hs_rx *rx = &msm_uport->rx;
854 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +0530855
Mayank Ranae6725162012-08-22 17:44:25 +0530856 mutex_lock(&msm_uport->clk_mutex);
Saket Saurabha8bd52e2013-02-15 12:50:27 +0530857 msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
Mayank Rana55046232011-03-07 10:28:42 +0530858
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530859 /*
860 * Disable Rx channel of UARTDM
861 * DMA Rx Stall happens if enqueue and flush of Rx command happens
862 * concurrently. Hence before changing the baud rate/protocol
863 * configuration and sending flush command to ADM, disable the Rx
864 * channel of UARTDM.
865 * Note: should not reset the receiver here immediately as it is not
866 * suggested to do disable/reset or reset/disable at the same time.
867 */
868 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530869 if (is_blsp_uart(msm_uport)) {
870 /* Disable UARTDM RX BAM Interface */
871 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
872 } else {
873 data &= ~UARTDM_RX_DM_EN_BMSK;
874 }
875
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530876 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530877
878 /* 300 is the minimum baud support by the driver */
879 bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
880
881 /* Temporary remapping 200 BAUD to 3.2 mbps */
882 if (bps == 200)
883 bps = 3200000;
884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 uport->uartclk = clk_get_rate(msm_uport->clk);
886 if (!uport->uartclk)
887 msm_hs_set_std_bps_locked(uport, bps);
888 else
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530889 msm_hs_set_bps_locked(uport, bps);
Mayank Rana55046232011-03-07 10:28:42 +0530890
891 data = msm_hs_read(uport, UARTDM_MR2_ADDR);
892 data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
893 /* set parity */
894 if (PARENB == (c_cflag & PARENB)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 if (PARODD == (c_cflag & PARODD)) {
Mayank Rana55046232011-03-07 10:28:42 +0530896 data |= ODD_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 } else if (CMSPAR == (c_cflag & CMSPAR)) {
Mayank Rana55046232011-03-07 10:28:42 +0530898 data |= SPACE_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 } else {
Mayank Rana55046232011-03-07 10:28:42 +0530900 data |= EVEN_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 }
Mayank Rana55046232011-03-07 10:28:42 +0530902 }
903
904 /* Set bits per char */
905 data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
906
907 switch (c_cflag & CSIZE) {
908 case CS5:
909 data |= FIVE_BPC;
910 break;
911 case CS6:
912 data |= SIX_BPC;
913 break;
914 case CS7:
915 data |= SEVEN_BPC;
916 break;
917 default:
918 data |= EIGHT_BPC;
919 break;
920 }
921 /* stop bits */
922 if (c_cflag & CSTOPB) {
923 data |= STOP_BIT_TWO;
924 } else {
925 /* otherwise 1 stop bit */
926 data |= STOP_BIT_ONE;
927 }
928 data |= UARTDM_MR2_ERROR_MODE_BMSK;
929 /* write parity/bits per char/stop bit configuration */
930 msm_hs_write(uport, UARTDM_MR2_ADDR, data);
931
932 /* Configure HW flow control */
933 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
934
935 data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
936
937 if (c_cflag & CRTSCTS) {
938 data |= UARTDM_MR1_CTS_CTL_BMSK;
939 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
940 }
941
942 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
943
944 uport->ignore_status_mask = termios->c_iflag & INPCK;
945 uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
Mayank Ranaadc41562013-01-04 12:44:01 +0530946 uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
Mayank Rana85aeee12012-11-27 14:49:46 +0530947
Mayank Rana55046232011-03-07 10:28:42 +0530948 uport->read_status_mask = (termios->c_cflag & CREAD);
949
Mayank Rana55046232011-03-07 10:28:42 +0530950
951 /* Set Transmit software time out */
952 uart_update_timeout(uport, c_cflag, bps);
953
954 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
955 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
956
957 if (msm_uport->rx.flush == FLUSH_NONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 wake_lock(&msm_uport->rx.wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +0530959 msm_uport->rx.flush = FLUSH_IGNORE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 /*
961 * Before using dmov APIs make sure that
962 * previous writel are completed. Hence
963 * dsb requires here.
964 */
965 mb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530966 if (is_blsp_uart(msm_uport)) {
Saket Saurabh1bde0862013-04-12 15:47:36 +0530967 ret = sps_disconnect(sps_pipe_handle);
968 if (ret)
969 pr_err("%s(): sps_disconnect failed\n",
970 __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530971 msm_hs_spsconnect_rx(uport);
972 msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
973 } else {
Mayank Rana9c8bda92013-02-28 11:58:04 +0530974 msm_uport->rx_discard_flush_issued = true;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530975 /* do discard flush */
976 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
Mayank Rana9c8bda92013-02-28 11:58:04 +0530977 pr_debug("%s(): wainting for flush completion.\n",
978 __func__);
979 ret = wait_event_timeout(msm_uport->rx.wait,
980 msm_uport->rx_discard_flush_issued == false,
981 RX_FLUSH_COMPLETE_TIMEOUT);
982 if (!ret)
983 pr_err("%s(): Discard flush pending.\n",
984 __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530985 }
Mayank Rana55046232011-03-07 10:28:42 +0530986 }
987
988 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700989 mb();
Mayank Ranae6725162012-08-22 17:44:25 +0530990 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +0530991}
992
993/*
994 * Standard API, Transmitter
995 * Any character in the transmit shift register is sent
996 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997unsigned int msm_hs_tx_empty(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530998{
999 unsigned int data;
1000 unsigned int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +05301001
1002 data = msm_hs_read(uport, UARTDM_SR_ADDR);
1003 if (data & UARTDM_SR_TXEMT_BMSK)
1004 ret = TIOCSER_TEMT;
1005
Mayank Rana55046232011-03-07 10:28:42 +05301006 return ret;
1007}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001008EXPORT_SYMBOL(msm_hs_tx_empty);
Mayank Rana55046232011-03-07 10:28:42 +05301009
1010/*
1011 * Standard API, Stop transmitter.
1012 * Any character in the transmit shift register is sent as
1013 * well as the current data mover transfer .
1014 */
1015static void msm_hs_stop_tx_locked(struct uart_port *uport)
1016{
1017 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1018
1019 msm_uport->tx.tx_ready_int_en = 0;
1020}
1021
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301022/* Disconnect BAM RX Endpoint Pipe Index from workqueue context*/
1023static void hsuart_disconnect_rx_endpoint_work(struct work_struct *w)
1024{
1025 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
1026 disconnect_rx_endpoint);
1027 struct msm_hs_rx *rx = &msm_uport->rx;
1028 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
Saket Saurabh1bde0862013-04-12 15:47:36 +05301029 int ret = 0;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301030
Saket Saurabh1bde0862013-04-12 15:47:36 +05301031 ret = sps_disconnect(sps_pipe_handle);
1032 if (ret)
1033 pr_err("%s(): sps_disconnect failed\n", __func__);
1034
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301035 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
1036 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1037 wake_up(&msm_uport->rx.wait);
1038}
1039
Mayank Rana55046232011-03-07 10:28:42 +05301040/*
1041 * Standard API, Stop receiver as soon as possible.
1042 *
1043 * Function immediately terminates the operation of the
1044 * channel receiver and any incoming characters are lost. None
1045 * of the receiver status bits are affected by this command and
1046 * characters that are already in the receive FIFO there.
1047 */
1048static void msm_hs_stop_rx_locked(struct uart_port *uport)
1049{
1050 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1051 unsigned int data;
1052
Mayank Rana55046232011-03-07 10:28:42 +05301053 /* disable dlink */
1054 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301055 if (is_blsp_uart(msm_uport))
1056 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1057 else
1058 data &= ~UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05301059 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
1060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001061 /* calling DMOV or CLOCK API. Hence mb() */
1062 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301063 /* Disable the receiver */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001064 if (msm_uport->rx.flush == FLUSH_NONE) {
1065 wake_lock(&msm_uport->rx.wake_lock);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301066 if (is_blsp_uart(msm_uport)) {
1067 msm_uport->rx.flush = FLUSH_STOP;
1068 /* workqueue for BAM rx endpoint disconnect */
1069 queue_work(msm_uport->hsuart_wq,
1070 &msm_uport->disconnect_rx_endpoint);
1071 } else {
1072 /* do discard flush */
1073 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
1074 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001075 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301076 if (!is_blsp_uart(msm_uport) && msm_uport->rx.flush != FLUSH_SHUTDOWN)
Mayank Rana55046232011-03-07 10:28:42 +05301077 msm_uport->rx.flush = FLUSH_STOP;
Saket Saurabh8b6b6af2013-02-19 16:04:16 +05301078
Mayank Rana55046232011-03-07 10:28:42 +05301079}
1080
1081/* Transmit the next chunk of data */
1082static void msm_hs_submit_tx_locked(struct uart_port *uport)
1083{
1084 int left;
1085 int tx_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001086 int aligned_tx_count;
Mayank Rana55046232011-03-07 10:28:42 +05301087 dma_addr_t src_addr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001088 dma_addr_t aligned_src_addr;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301089 u32 flags = SPS_IOVEC_FLAG_EOT;
Mayank Rana55046232011-03-07 10:28:42 +05301090 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1091 struct msm_hs_tx *tx = &msm_uport->tx;
1092 struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301093 struct sps_pipe *sps_pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05301094
1095 if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
1096 msm_hs_stop_tx_locked(uport);
1097 return;
1098 }
1099
1100 tx->dma_in_flight = 1;
1101
1102 tx_count = uart_circ_chars_pending(tx_buf);
1103
1104 if (UARTDM_TX_BUF_SIZE < tx_count)
1105 tx_count = UARTDM_TX_BUF_SIZE;
1106
1107 left = UART_XMIT_SIZE - tx_buf->tail;
1108
1109 if (tx_count > left)
1110 tx_count = left;
1111
1112 src_addr = tx->dma_base + tx_buf->tail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001113 /* Mask the src_addr to align on a cache
1114 * and add those bytes to tx_count */
1115 aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
1116 aligned_tx_count = tx_count + src_addr - aligned_src_addr;
1117
1118 dma_sync_single_for_device(uport->dev, aligned_src_addr,
1119 aligned_tx_count, DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301120
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301121 if (is_blsp_uart(msm_uport)) {
1122 /* Issue TX BAM Start IFC command */
1123 msm_hs_write(uport, UARTDM_CR_ADDR, START_TX_BAM_IFC);
1124 } else {
1125 tx->command_ptr->num_rows =
1126 (((tx_count + 15) >> 4) << 16) |
1127 ((tx_count + 15) >> 4);
1128 tx->command_ptr->src_row_addr = src_addr;
Mayank Rana55046232011-03-07 10:28:42 +05301129
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301130 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr,
1131 sizeof(dmov_box), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301132
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301133 *tx->command_ptr_ptr = CMD_PTR_LP |
1134 DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
1135 }
Mayank Rana55046232011-03-07 10:28:42 +05301136
Mayank Rana55046232011-03-07 10:28:42 +05301137 /* Save tx_count to use in Callback */
1138 tx->tx_count = tx_count;
1139 msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
1140
1141 /* Disable the tx_ready interrupt */
1142 msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
1143 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001144 /* Calling next DMOV API. Hence mb() here. */
1145 mb();
1146
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301147 msm_uport->tx.flush = FLUSH_NONE;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301148
1149 if (is_blsp_uart(msm_uport)) {
1150 sps_pipe_handle = tx->cons.pipe_handle;
1151 /* Queue transfer request to SPS */
1152 sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
1153 msm_uport, flags);
1154 } else {
1155 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
1156 sizeof(u32), DMA_TO_DEVICE);
1157
1158 msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
1159 }
Mayank Rana55046232011-03-07 10:28:42 +05301160}
1161
1162/* Start to receive the next chunk of data */
1163static void msm_hs_start_rx_locked(struct uart_port *uport)
1164{
1165 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301166 struct msm_hs_rx *rx = &msm_uport->rx;
1167 struct sps_pipe *sps_pipe_handle;
Saket Saurabh0dafb3c2013-04-12 11:44:04 +05301168 u32 flags = SPS_IOVEC_FLAG_INT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 unsigned int buffer_pending = msm_uport->rx.buffer_pending;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301170 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001171
1172 msm_uport->rx.buffer_pending = 0;
1173 if (buffer_pending && hs_serial_debug_mask)
1174 printk(KERN_ERR "Error: rx started in buffer state = %x",
1175 buffer_pending);
Mayank Rana55046232011-03-07 10:28:42 +05301176
1177 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
1178 msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
1179 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
1180 msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301181
1182 /*
1183 * Enable UARTDM Rx Interface as previously it has been
1184 * disable in set_termios before configuring baud rate.
1185 */
1186 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301187 if (is_blsp_uart(msm_uport)) {
1188 /* Enable UARTDM Rx BAM Interface */
1189 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1190 } else {
1191 data |= UARTDM_RX_DM_EN_BMSK;
1192 }
1193
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301194 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +05301195 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 /* Calling next DMOV API. Hence mb() here. */
1197 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301198
Mayank Rana05396b22013-03-16 19:10:11 +05301199 if (is_blsp_uart(msm_uport)) {
1200 /*
1201 * RX-transfer will be automatically re-activated
1202 * after last data of previous transfer was read.
1203 */
1204 data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
1205 RX_DMRX_CYCLIC_EN);
1206 msm_hs_write(uport, UARTDM_RX_TRANS_CTRL_ADDR, data);
1207 /* Issue RX BAM Start IFC command */
1208 msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
1209 mb();
1210 }
1211
Mayank Rana55046232011-03-07 10:28:42 +05301212 msm_uport->rx.flush = FLUSH_NONE;
Mayank Rana55046232011-03-07 10:28:42 +05301213
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301214 if (is_blsp_uart(msm_uport)) {
1215 sps_pipe_handle = rx->prod.pipe_handle;
1216 /* Queue transfer request to SPS */
1217 sps_transfer_one(sps_pipe_handle, rx->rbuffer,
1218 UARTDM_RX_BUF_SIZE, msm_uport, flags);
1219 } else {
1220 msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel,
1221 &msm_uport->rx.xfer);
1222 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001223}
1224
1225static void flip_insert_work(struct work_struct *work)
1226{
1227 unsigned long flags;
1228 int retval;
1229 struct msm_hs_port *msm_uport =
1230 container_of(work, struct msm_hs_port,
1231 rx.flip_insert_work.work);
1232 struct tty_struct *tty = msm_uport->uport.state->port.tty;
1233
1234 spin_lock_irqsave(&msm_uport->uport.lock, flags);
1235 if (msm_uport->rx.buffer_pending == NONE_PENDING) {
1236 if (hs_serial_debug_mask)
1237 printk(KERN_ERR "Error: No buffer pending in %s",
1238 __func__);
1239 return;
1240 }
1241 if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
1242 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1243 if (retval)
1244 msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
1245 }
1246 if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
1247 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1248 if (retval)
1249 msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
1250 }
1251 if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
1252 int rx_count, rx_offset;
1253 rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
1254 rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
1255 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer +
1256 rx_offset, rx_count);
1257 msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
1258 PARITY_ERROR);
1259 if (retval != rx_count)
1260 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1261 retval << 8 | (rx_count - retval) << 16;
1262 }
1263 if (msm_uport->rx.buffer_pending)
1264 schedule_delayed_work(&msm_uport->rx.flip_insert_work,
1265 msecs_to_jiffies(RETRY_TIMEOUT));
1266 else
1267 if ((msm_uport->clk_state == MSM_HS_CLK_ON) &&
1268 (msm_uport->rx.flush <= FLUSH_IGNORE)) {
1269 if (hs_serial_debug_mask)
1270 printk(KERN_WARNING
1271 "msm_serial_hs: "
1272 "Pending buffers cleared. "
1273 "Restarting\n");
1274 msm_hs_start_rx_locked(&msm_uport->uport);
1275 }
1276 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1277 tty_flip_buffer_push(tty);
1278}
1279
1280static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr)
1281{
1282 int retval;
1283 int rx_count;
1284 unsigned long status;
1285 unsigned long flags;
1286 unsigned int error_f = 0;
1287 struct uart_port *uport;
1288 struct msm_hs_port *msm_uport;
1289 unsigned int flush;
1290 struct tty_struct *tty;
Mayank Rana05396b22013-03-16 19:10:11 +05301291 struct sps_event_notify *notify;
1292 struct msm_hs_rx *rx;
1293 struct sps_pipe *sps_pipe_handle;
Saket Saurabh0dafb3c2013-04-12 11:44:04 +05301294 u32 sps_flags = SPS_IOVEC_FLAG_INT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295
1296 msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
1297 struct msm_hs_port, rx.tlet);
1298 uport = &msm_uport->uport;
1299 tty = uport->state->port.tty;
Mayank Rana05396b22013-03-16 19:10:11 +05301300 notify = &msm_uport->notify;
1301 rx = &msm_uport->rx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302
1303 status = msm_hs_read(uport, UARTDM_SR_ADDR);
1304
1305 spin_lock_irqsave(&uport->lock, flags);
1306
Mayank Rana05396b22013-03-16 19:10:11 +05301307 if (!is_blsp_uart(msm_uport))
1308 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001309
1310 /* overflow is not connect to data in a FIFO */
1311 if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
1312 (uport->read_status_mask & CREAD))) {
1313 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1314 if (!retval)
1315 msm_uport->rx.buffer_pending |= TTY_OVERRUN;
1316 uport->icount.buf_overrun++;
1317 error_f = 1;
1318 }
1319
1320 if (!(uport->ignore_status_mask & INPCK))
1321 status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
1322
1323 if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
1324 /* Can not tell difference between parity & frame error */
Mayank Rana85aeee12012-11-27 14:49:46 +05301325 if (hs_serial_debug_mask)
1326 printk(KERN_WARNING "msm_serial_hs: parity error\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 uport->icount.parity++;
1328 error_f = 1;
Mayank Rana85aeee12012-11-27 14:49:46 +05301329 if (!(uport->ignore_status_mask & IGNPAR)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1331 if (!retval)
1332 msm_uport->rx.buffer_pending |= TTY_PARITY;
1333 }
1334 }
1335
Mayank Rana85aeee12012-11-27 14:49:46 +05301336 if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
1337 if (hs_serial_debug_mask)
1338 printk(KERN_WARNING "msm_serial_hs: Rx break\n");
1339 uport->icount.brk++;
1340 error_f = 1;
1341 if (!(uport->ignore_status_mask & IGNBRK)) {
1342 retval = tty_insert_flip_char(tty, 0, TTY_BREAK);
1343 if (!retval)
1344 msm_uport->rx.buffer_pending |= TTY_BREAK;
1345 }
1346 }
1347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001348 if (error_f)
1349 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
1350
1351 if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
1352 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
1353 flush = msm_uport->rx.flush;
1354 if (flush == FLUSH_IGNORE)
1355 if (!msm_uport->rx.buffer_pending)
1356 msm_hs_start_rx_locked(uport);
1357
1358 if (flush == FLUSH_STOP) {
1359 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1360 wake_up(&msm_uport->rx.wait);
1361 }
1362 if (flush >= FLUSH_DATA_INVALID)
1363 goto out;
1364
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301365 if (is_blsp_uart(msm_uport)) {
Mayank Rana05396b22013-03-16 19:10:11 +05301366 rx_count = msm_uport->rx_count_callback;
1367 } else {
1368 rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
1369 /* order the read of rx.buffer */
1370 rmb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301371 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001372
1373 if (0 != (uport->read_status_mask & CREAD)) {
1374 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
1375 rx_count);
1376 if (retval != rx_count) {
1377 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1378 retval << 5 | (rx_count - retval) << 16;
1379 }
1380 }
1381
1382 /* order the read of rx.buffer and the start of next rx xfer */
1383 wmb();
1384
Mayank Rana05396b22013-03-16 19:10:11 +05301385 if (!msm_uport->rx.buffer_pending) {
1386 if (is_blsp_uart(msm_uport)) {
1387 msm_uport->rx.flush = FLUSH_NONE;
1388 sps_pipe_handle = rx->prod.pipe_handle;
1389 /* Queue transfer request to SPS */
1390 sps_transfer_one(sps_pipe_handle, rx->rbuffer,
1391 UARTDM_RX_BUF_SIZE, msm_uport, sps_flags);
1392 } else {
1393 msm_hs_start_rx_locked(uport);
1394 }
1395 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001396out:
1397 if (msm_uport->rx.buffer_pending) {
1398 if (hs_serial_debug_mask)
1399 printk(KERN_WARNING
1400 "msm_serial_hs: "
1401 "tty buffer exhausted. "
1402 "Stalling\n");
1403 schedule_delayed_work(&msm_uport->rx.flip_insert_work
1404 , msecs_to_jiffies(RETRY_TIMEOUT));
1405 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001406 /* release wakelock in 500ms, not immediately, because higher layers
1407 * don't always take wakelocks when they should */
1408 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
1409 /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
1410 spin_unlock_irqrestore(&uport->lock, flags);
1411 if (flush < FLUSH_DATA_INVALID)
1412 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05301413}
1414
1415/* Enable the transmitter Interrupt */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416static void msm_hs_start_tx_locked(struct uart_port *uport )
Mayank Rana55046232011-03-07 10:28:42 +05301417{
1418 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1419
Mayank Rana55046232011-03-07 10:28:42 +05301420 if (msm_uport->tx.tx_ready_int_en == 0) {
1421 msm_uport->tx.tx_ready_int_en = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001422 if (msm_uport->tx.dma_in_flight == 0)
1423 msm_hs_submit_tx_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301424 }
Mayank Rana55046232011-03-07 10:28:42 +05301425}
1426
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301427/**
1428 * Callback notification from SPS driver
1429 *
1430 * This callback function gets triggered called from
1431 * SPS driver when requested SPS data transfer is
1432 * completed.
1433 *
1434 */
1435
1436static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
1437{
1438 struct msm_hs_port *msm_uport =
1439 (struct msm_hs_port *)
1440 ((struct sps_event_notify *)notify)->user;
1441
1442 msm_uport->notify = *notify;
1443 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1444 __func__, notify->event_id,
1445 notify->data.transfer.iovec.addr,
1446 notify->data.transfer.iovec.size,
1447 notify->data.transfer.iovec.flags);
1448
1449 tasklet_schedule(&msm_uport->tx.tlet);
1450}
1451
Mayank Rana55046232011-03-07 10:28:42 +05301452/*
1453 * This routine is called when we are done with a DMA transfer
1454 *
1455 * This routine is registered with Data mover when we set
1456 * up a Data Mover transfer. It is called from Data mover ISR
1457 * when the DMA transfer is done.
1458 */
1459static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr,
1460 unsigned int result,
1461 struct msm_dmov_errdata *err)
1462{
Mayank Rana55046232011-03-07 10:28:42 +05301463 struct msm_hs_port *msm_uport;
1464
Mayank Rana55046232011-03-07 10:28:42 +05301465 msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301466 if (msm_uport->tx.flush == FLUSH_STOP)
1467 /* DMA FLUSH unsuccesfful */
1468 WARN_ON(!(result & DMOV_RSLT_FLUSH));
1469 else
1470 /* DMA did not finish properly */
1471 WARN_ON(!(result & DMOV_RSLT_DONE));
Mayank Rana55046232011-03-07 10:28:42 +05301472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001473 tasklet_schedule(&msm_uport->tx.tlet);
1474}
1475
1476static void msm_serial_hs_tx_tlet(unsigned long tlet_ptr)
1477{
1478 unsigned long flags;
1479 struct msm_hs_port *msm_uport = container_of((struct tasklet_struct *)
1480 tlet_ptr, struct msm_hs_port, tx.tlet);
1481
1482 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301483 if (msm_uport->tx.flush == FLUSH_STOP) {
1484 msm_uport->tx.flush = FLUSH_SHUTDOWN;
1485 wake_up(&msm_uport->tx.wait);
1486 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1487 return;
1488 }
Mayank Rana55046232011-03-07 10:28:42 +05301489
1490 msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491 msm_hs_write(&(msm_uport->uport), UARTDM_IMR_ADDR, msm_uport->imr_reg);
1492 /* Calling clk API. Hence mb() requires. */
1493 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001495 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
Mayank Rana55046232011-03-07 10:28:42 +05301496}
1497
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301498/**
1499 * Callback notification from SPS driver
1500 *
1501 * This callback function gets triggered called from
1502 * SPS driver when requested SPS data transfer is
1503 * completed.
1504 *
1505 */
1506
1507static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
1508{
1509
1510 struct msm_hs_port *msm_uport =
1511 (struct msm_hs_port *)
1512 ((struct sps_event_notify *)notify)->user;
Mayank Rana05396b22013-03-16 19:10:11 +05301513 struct uart_port *uport;
1514 unsigned long flags;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301515
Mayank Rana05396b22013-03-16 19:10:11 +05301516 uport = &(msm_uport->uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301517 msm_uport->notify = *notify;
1518 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1519 __func__, notify->event_id,
1520 notify->data.transfer.iovec.addr,
1521 notify->data.transfer.iovec.size,
1522 notify->data.transfer.iovec.flags);
1523
Mayank Rana05396b22013-03-16 19:10:11 +05301524 if (msm_uport->rx.flush == FLUSH_NONE) {
1525 spin_lock_irqsave(&uport->lock, flags);
1526 msm_uport->rx_count_callback = notify->data.transfer.iovec.size;
1527 spin_unlock_irqrestore(&uport->lock, flags);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301528 tasklet_schedule(&msm_uport->rx.tlet);
Mayank Rana05396b22013-03-16 19:10:11 +05301529 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301530}
1531
Mayank Rana55046232011-03-07 10:28:42 +05301532/*
1533 * This routine is called when we are done with a DMA transfer or the
1534 * a flush has been sent to the data mover driver.
1535 *
1536 * This routine is registered with Data mover when we set up a Data Mover
1537 * transfer. It is called from Data mover ISR when the DMA transfer is done.
1538 */
1539static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
1540 unsigned int result,
1541 struct msm_dmov_errdata *err)
1542{
Mayank Rana55046232011-03-07 10:28:42 +05301543 struct msm_hs_port *msm_uport;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301544 struct uart_port *uport;
1545 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301546
1547 msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
Mayank Rana9c8bda92013-02-28 11:58:04 +05301548 uport = &(msm_uport->uport);
1549
1550 pr_debug("%s(): called result:%x\n", __func__, result);
1551 if (!(result & DMOV_RSLT_ERROR)) {
1552 if (result & DMOV_RSLT_FLUSH) {
1553 if (msm_uport->rx_discard_flush_issued) {
1554 spin_lock_irqsave(&uport->lock, flags);
1555 msm_uport->rx_discard_flush_issued = false;
1556 spin_unlock_irqrestore(&uport->lock, flags);
1557 wake_up(&msm_uport->rx.wait);
1558 }
1559 }
1560 }
Mayank Rana55046232011-03-07 10:28:42 +05301561
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001562 tasklet_schedule(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05301563}
1564
1565/*
1566 * Standard API, Current states of modem control inputs
1567 *
1568 * Since CTS can be handled entirely by HARDWARE we always
1569 * indicate clear to send and count on the TX FIFO to block when
1570 * it fills up.
1571 *
1572 * - TIOCM_DCD
1573 * - TIOCM_CTS
1574 * - TIOCM_DSR
1575 * - TIOCM_RI
1576 * (Unsupported) DCD and DSR will return them high. RI will return low.
1577 */
1578static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
1579{
1580 return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
1581}
1582
1583/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001584 * Standard API, Set or clear RFR_signal
1585 *
1586 * Set RFR high, (Indicate we are not ready for data), we disable auto
1587 * ready for receiving and then set RFR_N high. To set RFR to low we just turn
1588 * back auto ready for receiving and it should lower RFR signal
1589 * when hardware is ready
Mayank Rana55046232011-03-07 10:28:42 +05301590 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591void msm_hs_set_mctrl_locked(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +05301592 unsigned int mctrl)
1593{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001594 unsigned int set_rts;
1595 unsigned int data;
Mayank Rana55046232011-03-07 10:28:42 +05301596
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001597 /* RTS is active low */
1598 set_rts = TIOCM_RTS & mctrl ? 0 : 1;
Mayank Rana55046232011-03-07 10:28:42 +05301599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001600 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
1601 if (set_rts) {
1602 /*disable auto ready-for-receiving */
1603 data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
1604 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1605 /* set RFR_N to high */
1606 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
1607 } else {
1608 /* Enable auto ready-for-receiving */
1609 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1610 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1611 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001612 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301613}
1614
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001615void msm_hs_set_mctrl(struct uart_port *uport,
1616 unsigned int mctrl)
1617{
1618 unsigned long flags;
1619
1620 spin_lock_irqsave(&uport->lock, flags);
1621 msm_hs_set_mctrl_locked(uport, mctrl);
1622 spin_unlock_irqrestore(&uport->lock, flags);
1623}
1624EXPORT_SYMBOL(msm_hs_set_mctrl);
1625
Mayank Rana55046232011-03-07 10:28:42 +05301626/* Standard API, Enable modem status (CTS) interrupt */
1627static void msm_hs_enable_ms_locked(struct uart_port *uport)
1628{
1629 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1630
Mayank Rana55046232011-03-07 10:28:42 +05301631 /* Enable DELTA_CTS Interrupt */
1632 msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
1633 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001634 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301635
1636}
1637
Saket Saurabhce394102012-10-29 19:51:28 +05301638static void msm_hs_flush_buffer(struct uart_port *uport)
1639{
1640 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1641
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301642 if (msm_uport->tx.dma_in_flight)
1643 msm_uport->tty_flush_receive = true;
Saket Saurabhce394102012-10-29 19:51:28 +05301644}
1645
Mayank Rana55046232011-03-07 10:28:42 +05301646/*
1647 * Standard API, Break Signal
1648 *
1649 * Control the transmission of a break signal. ctl eq 0 => break
1650 * signal terminate ctl ne 0 => start break signal
1651 */
1652static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
1653{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001654 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301655
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301657 msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001659 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301660}
1661
1662static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
1663{
1664 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301666
Mayank Rana55046232011-03-07 10:28:42 +05301667 if (cfg_flags & UART_CONFIG_TYPE) {
1668 uport->type = PORT_MSM;
1669 msm_hs_request_port(uport);
1670 }
Mayank Ranabbfd2692011-09-20 08:51:17 +05301671
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001672 if (is_gsbi_uart(msm_uport)) {
Mayank Rana00b6bff2011-08-17 08:33:42 +05301673 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301674 clk_prepare_enable(msm_uport->pclk);
1675 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001676 iowrite32(GSBI_PROTOCOL_UART, msm_uport->mapped_gsbi +
1677 GSBI_CONTROL_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301678 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana00b6bff2011-08-17 08:33:42 +05301679 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301680 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 }
Mayank Rana55046232011-03-07 10:28:42 +05301682}
1683
1684/* Handle CTS changes (Called from interrupt handler) */
Mayank Ranaee815f32011-12-08 09:06:09 +05301685static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301686{
Mayank Rana55046232011-03-07 10:28:42 +05301687 /* clear interrupt */
1688 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 /* Calling CLOCK API. Hence mb() requires here. */
1690 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301691 uport->icount.cts++;
1692
Mayank Rana55046232011-03-07 10:28:42 +05301693 /* clear the IOCTL TIOCMIWAIT if called */
1694 wake_up_interruptible(&uport->state->port.delta_msr_wait);
1695}
1696
1697/* check if the TX path is flushed, and if so clock off
1698 * returns 0 did not clock off, need to retry (still sending final byte)
1699 * -1 did not clock off, do not retry
1700 * 1 if we clocked off
1701 */
Mayank Ranacb589d82012-03-01 11:50:03 +05301702static int msm_hs_check_clock_off(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301703{
1704 unsigned long sr_status;
Mayank Ranacb589d82012-03-01 11:50:03 +05301705 unsigned long flags;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301706 int ret;
Mayank Rana55046232011-03-07 10:28:42 +05301707 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1708 struct circ_buf *tx_buf = &uport->state->xmit;
1709
Mayank Ranacb589d82012-03-01 11:50:03 +05301710 mutex_lock(&msm_uport->clk_mutex);
1711 spin_lock_irqsave(&uport->lock, flags);
1712
Mayank Rana55046232011-03-07 10:28:42 +05301713 /* Cancel if tx tty buffer is not empty, dma is in flight,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001714 * or tx fifo is not empty */
Mayank Rana55046232011-03-07 10:28:42 +05301715 if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
1716 !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001717 msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) {
Mayank Ranacb589d82012-03-01 11:50:03 +05301718 spin_unlock_irqrestore(&uport->lock, flags);
1719 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301720 return -1;
1721 }
1722
1723 /* Make sure the uart is finished with the last byte */
1724 sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301725 if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) {
1726 spin_unlock_irqrestore(&uport->lock, flags);
1727 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301728 return 0; /* retry */
Mayank Ranacb589d82012-03-01 11:50:03 +05301729 }
Mayank Rana55046232011-03-07 10:28:42 +05301730
1731 /* Make sure forced RXSTALE flush complete */
1732 switch (msm_uport->clk_req_off_state) {
1733 case CLK_REQ_OFF_START:
1734 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
Mayank Rana05396b22013-03-16 19:10:11 +05301735
1736 if (!is_blsp_uart(msm_uport)) {
1737 msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
1738 /*
1739 * Before returning make sure that device writel
1740 * completed. Hence mb() requires here.
1741 */
1742 mb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301743 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301744 spin_unlock_irqrestore(&uport->lock, flags);
1745 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301746 return 0; /* RXSTALE flush not complete - retry */
1747 case CLK_REQ_OFF_RXSTALE_ISSUED:
1748 case CLK_REQ_OFF_FLUSH_ISSUED:
Mayank Ranacb589d82012-03-01 11:50:03 +05301749 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana05396b22013-03-16 19:10:11 +05301750 if (is_blsp_uart(msm_uport)) {
1751 msm_uport->clk_req_off_state =
1752 CLK_REQ_OFF_RXSTALE_FLUSHED;
Mayank Rana05396b22013-03-16 19:10:11 +05301753 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301754 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301755 return 0; /* RXSTALE flush not complete - retry */
1756 case CLK_REQ_OFF_RXSTALE_FLUSHED:
1757 break; /* continue */
1758 }
1759
1760 if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
Mayank Rana9c8bda92013-02-28 11:58:04 +05301761 if (msm_uport->rx.flush == FLUSH_NONE) {
Mayank Rana55046232011-03-07 10:28:42 +05301762 msm_hs_stop_rx_locked(uport);
Saket Saurabh467614f2013-03-16 17:24:12 +05301763 if (!is_blsp_uart(msm_uport))
1764 msm_uport->rx_discard_flush_issued = true;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301765 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301766
1767 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana9c8bda92013-02-28 11:58:04 +05301768 if (msm_uport->rx_discard_flush_issued) {
1769 pr_debug("%s(): wainting for flush completion.\n",
1770 __func__);
1771 ret = wait_event_timeout(msm_uport->rx.wait,
1772 msm_uport->rx_discard_flush_issued == false,
1773 RX_FLUSH_COMPLETE_TIMEOUT);
1774 if (!ret)
1775 pr_err("%s(): Flush complete pending.\n",
1776 __func__);
1777 }
1778
Mayank Ranacb589d82012-03-01 11:50:03 +05301779 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301780 return 0; /* come back later to really clock off */
1781 }
1782
Mayank Rana55046232011-03-07 10:28:42 +05301783 spin_unlock_irqrestore(&uport->lock, flags);
1784
Mayank Rana55046232011-03-07 10:28:42 +05301785 /* we really want to clock off */
Mayank Ranacb589d82012-03-01 11:50:03 +05301786 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301788 clk_disable_unprepare(msm_uport->pclk);
1789
Mayank Rana55046232011-03-07 10:28:42 +05301790 msm_uport->clk_state = MSM_HS_CLK_OFF;
Mayank Ranacb589d82012-03-01 11:50:03 +05301791
1792 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793 if (use_low_power_wakeup(msm_uport)) {
1794 msm_uport->wakeup.ignore = 1;
1795 enable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05301796 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001797 wake_unlock(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +05301798
1799 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301800
1801 /* Reset PNOC Bus Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301802 msm_hs_bus_voting(msm_uport, BUS_RESET);
Mayank Ranacb589d82012-03-01 11:50:03 +05301803 mutex_unlock(&msm_uport->clk_mutex);
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301804
Mayank Rana55046232011-03-07 10:28:42 +05301805 return 1;
1806}
1807
Mayank Ranacb589d82012-03-01 11:50:03 +05301808static void hsuart_clock_off_work(struct work_struct *w)
1809{
1810 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
1811 clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301812 struct uart_port *uport = &msm_uport->uport;
1813
Mayank Ranacb589d82012-03-01 11:50:03 +05301814 if (!msm_hs_check_clock_off(uport)) {
1815 hrtimer_start(&msm_uport->clk_off_timer,
1816 msm_uport->clk_off_delay,
1817 HRTIMER_MODE_REL);
Mayank Rana55046232011-03-07 10:28:42 +05301818 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301819}
Mayank Rana55046232011-03-07 10:28:42 +05301820
Mayank Ranacb589d82012-03-01 11:50:03 +05301821static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
1822{
1823 struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
1824 clk_off_timer);
Mayank Rana55046232011-03-07 10:28:42 +05301825
Mayank Ranacb589d82012-03-01 11:50:03 +05301826 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
1827 return HRTIMER_NORESTART;
Mayank Rana55046232011-03-07 10:28:42 +05301828}
1829
1830static irqreturn_t msm_hs_isr(int irq, void *dev)
1831{
1832 unsigned long flags;
1833 unsigned long isr_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001834 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05301835 struct uart_port *uport = &msm_uport->uport;
1836 struct circ_buf *tx_buf = &uport->state->xmit;
1837 struct msm_hs_tx *tx = &msm_uport->tx;
1838 struct msm_hs_rx *rx = &msm_uport->rx;
1839
1840 spin_lock_irqsave(&uport->lock, flags);
1841
1842 isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
1843
1844 /* Uart RX starting */
1845 if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846 wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */
Mayank Rana55046232011-03-07 10:28:42 +05301847 msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
1848 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001849 /* Complete device write for IMR. Hence mb() requires. */
1850 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301851 }
1852 /* Stale rx interrupt */
1853 if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
1854 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
1855 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001856 /*
1857 * Complete device write before calling DMOV API. Hence
1858 * mb() requires here.
1859 */
1860 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301861
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301862 if (msm_uport->clk_req_off_state ==
Mayank Rana05396b22013-03-16 19:10:11 +05301863 CLK_REQ_OFF_RXSTALE_ISSUED)
Mayank Rana55046232011-03-07 10:28:42 +05301864 msm_uport->clk_req_off_state =
Mayank Rana05396b22013-03-16 19:10:11 +05301865 CLK_REQ_OFF_FLUSH_ISSUED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001866
Mayank Rana05396b22013-03-16 19:10:11 +05301867 if (!is_blsp_uart(msm_uport) && (rx->flush == FLUSH_NONE)) {
Mayank Rana55046232011-03-07 10:28:42 +05301868 rx->flush = FLUSH_DATA_READY;
Mayank Rana05396b22013-03-16 19:10:11 +05301869 msm_dmov_flush(msm_uport->dma_rx_channel, 1);
Mayank Rana55046232011-03-07 10:28:42 +05301870 }
1871 }
1872 /* tx ready interrupt */
1873 if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
1874 /* Clear TX Ready */
1875 msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
1876
1877 if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
1878 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1879 msm_hs_write(uport, UARTDM_IMR_ADDR,
1880 msm_uport->imr_reg);
1881 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001882 /*
1883 * Complete both writes before starting new TX.
1884 * Hence mb() requires here.
1885 */
1886 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301887 /* Complete DMA TX transactions and submit new transactions */
Saket Saurabhce394102012-10-29 19:51:28 +05301888
1889 /* Do not update tx_buf.tail if uart_flush_buffer already
1890 called in serial core */
1891 if (!msm_uport->tty_flush_receive)
1892 tx_buf->tail = (tx_buf->tail +
1893 tx->tx_count) & ~UART_XMIT_SIZE;
1894 else
1895 msm_uport->tty_flush_receive = false;
Mayank Rana55046232011-03-07 10:28:42 +05301896
1897 tx->dma_in_flight = 0;
1898
1899 uport->icount.tx += tx->tx_count;
1900 if (tx->tx_ready_int_en)
1901 msm_hs_submit_tx_locked(uport);
1902
1903 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
1904 uart_write_wakeup(uport);
1905 }
1906 if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
1907 /* TX FIFO is empty */
1908 msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
1909 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 /*
1911 * Complete device write before starting clock_off request.
1912 * Hence mb() requires here.
1913 */
1914 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +05301915 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301916 }
1917
1918 /* Change in CTS interrupt */
1919 if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
Mayank Ranaee815f32011-12-08 09:06:09 +05301920 msm_hs_handle_delta_cts_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301921
1922 spin_unlock_irqrestore(&uport->lock, flags);
1923
1924 return IRQ_HANDLED;
1925}
1926
Mayank Rana3715fe62013-03-25 18:33:58 +05301927/*
1928 * Find UART device port using its port index value.
1929 */
1930struct uart_port *msm_hs_get_uart_port(int port_index)
1931{
1932 int i;
1933
1934 for (i = 0; i < UARTDM_NR; i++) {
1935 if (q_uart_port[i].uport.line == port_index)
1936 return &q_uart_port[i].uport;
1937 }
1938
1939 return NULL;
1940}
1941EXPORT_SYMBOL(msm_hs_get_uart_port);
1942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001943/* request to turn off uart clock once pending TX is flushed */
1944void msm_hs_request_clock_off(struct uart_port *uport) {
1945 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301946 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001948 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301949 if (msm_uport->clk_state == MSM_HS_CLK_ON) {
1950 msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
1951 msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
Mayank Rana55046232011-03-07 10:28:42 +05301952 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1953 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001954 /*
1955 * Complete device write before retuning back.
1956 * Hence mb() requires here.
1957 */
1958 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301959 }
Mayank Rana55046232011-03-07 10:28:42 +05301960 spin_unlock_irqrestore(&uport->lock, flags);
1961}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001962EXPORT_SYMBOL(msm_hs_request_clock_off);
Mayank Rana55046232011-03-07 10:28:42 +05301963
Mayank Ranacb589d82012-03-01 11:50:03 +05301964void msm_hs_request_clock_on(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301965{
1966 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Ranacb589d82012-03-01 11:50:03 +05301967 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301968 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001969 int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +05301970
Mayank Ranacb589d82012-03-01 11:50:03 +05301971 mutex_lock(&msm_uport->clk_mutex);
1972 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301973
1974 switch (msm_uport->clk_state) {
1975 case MSM_HS_CLK_OFF:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001976 wake_lock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001977 disable_irq_nosync(msm_uport->wakeup.irq);
Mayank Ranacb589d82012-03-01 11:50:03 +05301978 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301979
1980 /* Vote for PNOC BUS Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301981 msm_hs_bus_voting(msm_uport, BUS_SCALING);
Mayank Rana88d49142013-01-16 17:28:53 +05301982
Mayank Ranacb589d82012-03-01 11:50:03 +05301983 ret = clk_prepare_enable(msm_uport->clk);
1984 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001985 dev_err(uport->dev, "Clock ON Failure"
Mayank Ranacb589d82012-03-01 11:50:03 +05301986 "For UART CLK Stalling HSUART\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001987 break;
1988 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301989
1990 if (msm_uport->pclk) {
1991 ret = clk_prepare_enable(msm_uport->pclk);
1992 if (unlikely(ret)) {
1993 clk_disable_unprepare(msm_uport->clk);
1994 dev_err(uport->dev, "Clock ON Failure"
1995 "For UART Pclk Stalling HSUART\n");
1996 break;
1997 }
1998 }
1999 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002000 /* else fall-through */
Mayank Rana55046232011-03-07 10:28:42 +05302001 case MSM_HS_CLK_REQUEST_OFF:
2002 if (msm_uport->rx.flush == FLUSH_STOP ||
2003 msm_uport->rx.flush == FLUSH_SHUTDOWN) {
2004 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
2005 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302006 if (is_blsp_uart(msm_uport))
2007 data |= UARTDM_RX_BAM_ENABLE_BMSK;
2008 else
2009 data |= UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05302010 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002011 /* Complete above device write. Hence mb() here. */
2012 mb();
Mayank Rana55046232011-03-07 10:28:42 +05302013 }
2014 hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
Mayank Rana05396b22013-03-16 19:10:11 +05302015 if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
2016 if (is_blsp_uart(msm_uport)) {
2017 spin_unlock_irqrestore(&uport->lock, flags);
2018 msm_hs_spsconnect_rx(uport);
2019 spin_lock_irqsave(&uport->lock, flags);
2020 }
Mayank Rana55046232011-03-07 10:28:42 +05302021 msm_hs_start_rx_locked(uport);
Mayank Rana05396b22013-03-16 19:10:11 +05302022 }
Mayank Rana55046232011-03-07 10:28:42 +05302023 if (msm_uport->rx.flush == FLUSH_STOP)
2024 msm_uport->rx.flush = FLUSH_IGNORE;
2025 msm_uport->clk_state = MSM_HS_CLK_ON;
2026 break;
2027 case MSM_HS_CLK_ON:
2028 break;
2029 case MSM_HS_CLK_PORT_OFF:
2030 break;
2031 }
Mayank Rana55046232011-03-07 10:28:42 +05302032
Mayank Rana55046232011-03-07 10:28:42 +05302033 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Ranacb589d82012-03-01 11:50:03 +05302034 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05302035}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002036EXPORT_SYMBOL(msm_hs_request_clock_on);
Mayank Rana55046232011-03-07 10:28:42 +05302037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002038static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
Mayank Rana55046232011-03-07 10:28:42 +05302039{
2040 unsigned int wakeup = 0;
2041 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05302043 struct uart_port *uport = &msm_uport->uport;
2044 struct tty_struct *tty = NULL;
2045
2046 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047 if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
2048 /* ignore the first irq - it is a pending irq that occured
Mayank Rana55046232011-03-07 10:28:42 +05302049 * before enable_irq() */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002050 if (msm_uport->wakeup.ignore)
2051 msm_uport->wakeup.ignore = 0;
Mayank Rana55046232011-03-07 10:28:42 +05302052 else
2053 wakeup = 1;
2054 }
2055
2056 if (wakeup) {
2057 /* the uart was clocked off during an rx, wake up and
2058 * optionally inject char into tty rx */
Mayank Ranacb589d82012-03-01 11:50:03 +05302059 spin_unlock_irqrestore(&uport->lock, flags);
2060 msm_hs_request_clock_on(uport);
2061 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002062 if (msm_uport->wakeup.inject_rx) {
Mayank Rana55046232011-03-07 10:28:42 +05302063 tty = uport->state->port.tty;
2064 tty_insert_flip_char(tty,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002065 msm_uport->wakeup.rx_to_inject,
Mayank Rana55046232011-03-07 10:28:42 +05302066 TTY_NORMAL);
Mayank Rana55046232011-03-07 10:28:42 +05302067 }
2068 }
2069
2070 spin_unlock_irqrestore(&uport->lock, flags);
2071
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002072 if (wakeup && msm_uport->wakeup.inject_rx)
2073 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05302074 return IRQ_HANDLED;
2075}
2076
2077static const char *msm_hs_type(struct uart_port *port)
2078{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002079 return ("MSM HS UART");
Mayank Rana55046232011-03-07 10:28:42 +05302080}
2081
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302082/**
2083 * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
2084 * @uport: uart port
2085 */
2086static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
2087{
2088 struct platform_device *pdev = to_platform_device(uport->dev);
2089 const struct msm_serial_hs_platform_data *pdata =
2090 pdev->dev.platform_data;
2091
2092 if (pdata) {
2093 if (gpio_is_valid(pdata->uart_tx_gpio))
2094 gpio_free(pdata->uart_tx_gpio);
2095 if (gpio_is_valid(pdata->uart_rx_gpio))
2096 gpio_free(pdata->uart_rx_gpio);
2097 if (gpio_is_valid(pdata->uart_cts_gpio))
2098 gpio_free(pdata->uart_cts_gpio);
2099 if (gpio_is_valid(pdata->uart_rfr_gpio))
2100 gpio_free(pdata->uart_rfr_gpio);
2101 } else {
2102 pr_err("Error:Pdata is NULL.\n");
2103 }
2104}
2105
2106/**
2107 * msm_hs_config_uart_gpios - Configures UART GPIOs
2108 * @uport: uart port
2109 */
2110static int msm_hs_config_uart_gpios(struct uart_port *uport)
2111{
2112 struct platform_device *pdev = to_platform_device(uport->dev);
2113 const struct msm_serial_hs_platform_data *pdata =
2114 pdev->dev.platform_data;
2115 int ret = 0;
2116
2117 if (pdata) {
2118 if (gpio_is_valid(pdata->uart_tx_gpio)) {
2119 ret = gpio_request(pdata->uart_tx_gpio,
2120 "UART_TX_GPIO");
2121 if (unlikely(ret)) {
2122 pr_err("gpio request failed for:%d\n",
2123 pdata->uart_tx_gpio);
2124 goto exit_uart_config;
2125 }
2126 }
2127
2128 if (gpio_is_valid(pdata->uart_rx_gpio)) {
2129 ret = gpio_request(pdata->uart_rx_gpio,
2130 "UART_RX_GPIO");
2131 if (unlikely(ret)) {
2132 pr_err("gpio request failed for:%d\n",
2133 pdata->uart_rx_gpio);
2134 goto uart_tx_unconfig;
2135 }
2136 }
2137
2138 if (gpio_is_valid(pdata->uart_cts_gpio)) {
2139 ret = gpio_request(pdata->uart_cts_gpio,
2140 "UART_CTS_GPIO");
2141 if (unlikely(ret)) {
2142 pr_err("gpio request failed for:%d\n",
2143 pdata->uart_cts_gpio);
2144 goto uart_rx_unconfig;
2145 }
2146 }
2147
2148 if (gpio_is_valid(pdata->uart_rfr_gpio)) {
2149 ret = gpio_request(pdata->uart_rfr_gpio,
2150 "UART_RFR_GPIO");
2151 if (unlikely(ret)) {
2152 pr_err("gpio request failed for:%d\n",
2153 pdata->uart_rfr_gpio);
2154 goto uart_cts_unconfig;
2155 }
2156 }
2157 } else {
2158 pr_err("Pdata is NULL.\n");
2159 ret = -EINVAL;
2160 }
2161 return ret;
2162
2163uart_cts_unconfig:
2164 if (gpio_is_valid(pdata->uart_cts_gpio))
2165 gpio_free(pdata->uart_cts_gpio);
2166uart_rx_unconfig:
2167 if (gpio_is_valid(pdata->uart_rx_gpio))
2168 gpio_free(pdata->uart_rx_gpio);
2169uart_tx_unconfig:
2170 if (gpio_is_valid(pdata->uart_tx_gpio))
2171 gpio_free(pdata->uart_tx_gpio);
2172exit_uart_config:
2173 return ret;
2174}
2175
Mayank Rana55046232011-03-07 10:28:42 +05302176/* Called when port is opened */
2177static int msm_hs_startup(struct uart_port *uport)
2178{
2179 int ret;
2180 int rfr_level;
2181 unsigned long flags;
2182 unsigned int data;
2183 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05302184 struct platform_device *pdev = to_platform_device(uport->dev);
2185 const struct msm_serial_hs_platform_data *pdata =
2186 pdev->dev.platform_data;
Mayank Rana55046232011-03-07 10:28:42 +05302187 struct circ_buf *tx_buf = &uport->state->xmit;
2188 struct msm_hs_tx *tx = &msm_uport->tx;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302189 struct msm_hs_rx *rx = &msm_uport->rx;
2190 struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
2191 struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05302192
2193 rfr_level = uport->fifosize;
2194 if (rfr_level > 16)
2195 rfr_level -= 16;
2196
2197 tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
2198 DMA_TO_DEVICE);
2199
Mayank Rana679436e2012-03-31 05:41:14 +05302200 wake_lock(&msm_uport->dma_wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +05302201 /* turn on uart clk */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002202 ret = msm_hs_init_clk(uport);
Mayank Rana55046232011-03-07 10:28:42 +05302203 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302204 pr_err("Turning ON uartclk error\n");
2205 wake_unlock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002206 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05302207 }
2208
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302209 if (is_blsp_uart(msm_uport)) {
2210 ret = msm_hs_config_uart_gpios(uport);
2211 if (ret) {
2212 pr_err("Uart GPIO request failed\n");
2213 goto deinit_uart_clk;
2214 }
2215 } else {
2216 if (pdata && pdata->gpio_config)
2217 if (unlikely(pdata->gpio_config(1)))
2218 dev_err(uport->dev, "Cannot configure gpios\n");
2219 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302220
2221 /* SPS Connect for BAM endpoints */
2222 if (is_blsp_uart(msm_uport)) {
2223 /* SPS connect for TX */
2224 ret = msm_hs_spsconnect_tx(uport);
2225 if (ret) {
2226 pr_err("msm_serial_hs: SPS connect failed for TX");
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302227 goto unconfig_uart_gpios;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302228 }
2229
2230 /* SPS connect for RX */
2231 ret = msm_hs_spsconnect_rx(uport);
2232 if (ret) {
2233 pr_err("msm_serial_hs: SPS connect failed for RX");
2234 goto sps_disconnect_tx;
2235 }
2236 }
2237
Mayank Rana55046232011-03-07 10:28:42 +05302238 /* Set auto RFR Level */
2239 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
2240 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
2241 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
2242 data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
2243 data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
2244 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
2245
2246 /* Make sure RXSTALE count is non-zero */
2247 data = msm_hs_read(uport, UARTDM_IPR_ADDR);
2248 if (!data) {
2249 data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
2250 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
2251 }
2252
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302253 if (is_blsp_uart(msm_uport)) {
2254 /* Enable BAM mode */
2255 data = UARTDM_TX_BAM_ENABLE_BMSK | UARTDM_RX_BAM_ENABLE_BMSK;
2256 } else {
2257 /* Enable Data Mover Mode */
2258 data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
2259 }
Mayank Rana55046232011-03-07 10:28:42 +05302260 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
2261
2262 /* Reset TX */
2263 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
2264 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
2265 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
2266 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
2267 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
2268 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
2269 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
2270 /* Turn on Uart Receiver */
2271 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
2272
2273 /* Turn on Uart Transmitter */
2274 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
2275
2276 /* Initialize the tx */
2277 tx->tx_ready_int_en = 0;
2278 tx->dma_in_flight = 0;
2279
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302280 if (!is_blsp_uart(msm_uport)) {
2281 tx->xfer.complete_func = msm_hs_dmov_tx_callback;
Mayank Rana55046232011-03-07 10:28:42 +05302282
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302283 tx->command_ptr->cmd = CMD_LC |
2284 CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
Mayank Rana55046232011-03-07 10:28:42 +05302285
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302286 tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
Mayank Rana55046232011-03-07 10:28:42 +05302287 | (MSM_UARTDM_BURST_SIZE);
2288
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302289 tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16);
Mayank Rana55046232011-03-07 10:28:42 +05302290
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302291 tx->command_ptr->dst_row_addr =
2292 msm_uport->uport.mapbase + UARTDM_TF_ADDR;
Mayank Rana05396b22013-03-16 19:10:11 +05302293
2294 msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302295 }
Mayank Rana55046232011-03-07 10:28:42 +05302296
Mayank Rana55046232011-03-07 10:28:42 +05302297 /* Enable reading the current CTS, no harm even if CTS is ignored */
2298 msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
2299
2300 msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002301 /*
2302 * Complete all device write related configuration before
2303 * queuing RX request. Hence mb() requires here.
2304 */
2305 mb();
Mayank Rana55046232011-03-07 10:28:42 +05302306
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002307 if (use_low_power_wakeup(msm_uport)) {
2308 ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
Mayank Rana679436e2012-03-31 05:41:14 +05302309 if (unlikely(ret)) {
2310 pr_err("%s():Err setting wakeup irq\n", __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302311 goto sps_disconnect_rx;
Mayank Rana679436e2012-03-31 05:41:14 +05302312 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002313 }
Mayank Rana55046232011-03-07 10:28:42 +05302314
2315 ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
2316 "msm_hs_uart", msm_uport);
2317 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302318 pr_err("%s():Error getting uart irq\n", __func__);
2319 goto free_wake_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302320 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002321 if (use_low_power_wakeup(msm_uport)) {
Mayank Ranacb589d82012-03-01 11:50:03 +05302322
2323 ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
2324 msm_hs_wakeup_isr,
2325 IRQF_TRIGGER_FALLING,
2326 "msm_hs_wakeup", msm_uport);
2327
Mayank Rana55046232011-03-07 10:28:42 +05302328 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302329 pr_err("%s():Err getting uart wakeup_irq\n", __func__);
2330 goto free_uart_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302331 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002332 disable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05302333 }
2334
Mayank Rana88d49142013-01-16 17:28:53 +05302335 /* Vote for PNOC BUS Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302336 msm_hs_bus_voting(msm_uport, BUS_SCALING);
Mayank Rana88d49142013-01-16 17:28:53 +05302337
Mayank Rana55046232011-03-07 10:28:42 +05302338 spin_lock_irqsave(&uport->lock, flags);
2339
Mayank Rana55046232011-03-07 10:28:42 +05302340 msm_hs_start_rx_locked(uport);
2341
2342 spin_unlock_irqrestore(&uport->lock, flags);
2343 ret = pm_runtime_set_active(uport->dev);
2344 if (ret)
2345 dev_err(uport->dev, "set active error:%d\n", ret);
2346 pm_runtime_enable(uport->dev);
2347
2348 return 0;
2349
Mayank Rana679436e2012-03-31 05:41:14 +05302350free_uart_irq:
2351 free_irq(uport->irq, msm_uport);
2352free_wake_irq:
2353 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302354sps_disconnect_rx:
2355 if (is_blsp_uart(msm_uport))
2356 sps_disconnect(sps_pipe_handle_rx);
2357sps_disconnect_tx:
2358 if (is_blsp_uart(msm_uport))
2359 sps_disconnect(sps_pipe_handle_tx);
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302360unconfig_uart_gpios:
2361 if (is_blsp_uart(msm_uport))
2362 msm_hs_unconfig_uart_gpios(uport);
Mayank Rana679436e2012-03-31 05:41:14 +05302363deinit_uart_clk:
Mayank Ranacb589d82012-03-01 11:50:03 +05302364 clk_disable_unprepare(msm_uport->clk);
Mayank Rana679436e2012-03-31 05:41:14 +05302365 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05302366 clk_disable_unprepare(msm_uport->pclk);
Mayank Rana679436e2012-03-31 05:41:14 +05302367 wake_unlock(&msm_uport->dma_wake_lock);
2368
Mayank Rana55046232011-03-07 10:28:42 +05302369 return ret;
2370}
2371
2372/* Initialize tx and rx data structures */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002373static int uartdm_init_port(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05302374{
2375 int ret = 0;
2376 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2377 struct msm_hs_tx *tx = &msm_uport->tx;
2378 struct msm_hs_rx *rx = &msm_uport->rx;
2379
Mayank Rana55046232011-03-07 10:28:42 +05302380 init_waitqueue_head(&rx->wait);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302381 init_waitqueue_head(&tx->wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002382 wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
2383 wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
2384 "msm_serial_hs_dma");
2385
2386 tasklet_init(&rx->tlet, msm_serial_hs_rx_tlet,
2387 (unsigned long) &rx->tlet);
2388 tasklet_init(&tx->tlet, msm_serial_hs_tx_tlet,
2389 (unsigned long) &tx->tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302390
2391 rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
2392 UARTDM_RX_BUF_SIZE, 16, 0);
2393 if (!rx->pool) {
2394 pr_err("%s(): cannot allocate rx_buffer_pool", __func__);
2395 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002396 goto exit_tasket_init;
Mayank Rana55046232011-03-07 10:28:42 +05302397 }
2398
2399 rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
2400 if (!rx->buffer) {
2401 pr_err("%s(): cannot allocate rx->buffer", __func__);
2402 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002403 goto free_pool;
Mayank Rana55046232011-03-07 10:28:42 +05302404 }
2405
Mayank Ranaff398d02012-12-18 10:22:50 +05302406 /* Set up Uart Receive */
Mayank Rana05396b22013-03-16 19:10:11 +05302407 if (is_blsp_uart(msm_uport))
2408 msm_hs_write(uport, UARTDM_RFWR_ADDR, 32);
2409 else
2410 msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
Mayank Ranaff398d02012-12-18 10:22:50 +05302411
2412 INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
2413
2414 if (is_blsp_uart(msm_uport))
2415 return ret;
2416
2417 /* Allocate the command pointer. Needs to be 64 bit aligned */
2418 tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2419 if (!tx->command_ptr) {
2420 return -ENOMEM;
2421 goto free_rx_buffer;
2422 }
2423
2424 tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
2425 if (!tx->command_ptr_ptr) {
2426 ret = -ENOMEM;
2427 goto free_tx_command_ptr;
2428 }
2429
2430 tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
2431 sizeof(dmov_box), DMA_TO_DEVICE);
2432 tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
2433 tx->command_ptr_ptr,
2434 sizeof(u32), DMA_TO_DEVICE);
2435 tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
2436
Mayank Rana55046232011-03-07 10:28:42 +05302437 /* Allocate the command pointer. Needs to be 64 bit aligned */
2438 rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2439 if (!rx->command_ptr) {
2440 pr_err("%s(): cannot allocate rx->command_ptr", __func__);
2441 ret = -ENOMEM;
Mayank Ranaff398d02012-12-18 10:22:50 +05302442 goto free_tx_command_ptr_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302443 }
2444
Mayank Rana8431de82011-12-08 09:06:08 +05302445 rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
Mayank Rana55046232011-03-07 10:28:42 +05302446 if (!rx->command_ptr_ptr) {
2447 pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
2448 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002449 goto free_rx_command_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302450 }
2451
2452 rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
2453 (UARTDM_RX_BUF_SIZE >> 4);
2454
2455 rx->command_ptr->dst_row_addr = rx->rbuffer;
2456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002457 rx->xfer.complete_func = msm_hs_dmov_rx_callback;
2458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002459 rx->command_ptr->cmd = CMD_LC |
2460 CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
2461
2462 rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
2463 | (MSM_UARTDM_BURST_SIZE);
2464 rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE;
2465 rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
2466
Mayank Rana55046232011-03-07 10:28:42 +05302467 rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
2468 sizeof(dmov_box), DMA_TO_DEVICE);
2469
2470 *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
2471
2472 rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
Mayank Rana8431de82011-12-08 09:06:08 +05302473 sizeof(u32), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05302474 rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
2475
Mayank Rana55046232011-03-07 10:28:42 +05302476 return ret;
2477
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002478free_rx_command_ptr:
Mayank Rana55046232011-03-07 10:28:42 +05302479 kfree(rx->command_ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002480
Mayank Ranaff398d02012-12-18 10:22:50 +05302481free_tx_command_ptr_ptr:
2482 kfree(msm_uport->tx.command_ptr_ptr);
2483 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
2484 sizeof(u32), DMA_TO_DEVICE);
2485 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
2486 sizeof(dmov_box), DMA_TO_DEVICE);
2487
2488free_tx_command_ptr:
2489 kfree(msm_uport->tx.command_ptr);
2490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002491free_rx_buffer:
Mayank Rana55046232011-03-07 10:28:42 +05302492 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002493 msm_uport->rx.rbuffer);
2494
2495free_pool:
Mayank Rana55046232011-03-07 10:28:42 +05302496 dma_pool_destroy(msm_uport->rx.pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002497
2498exit_tasket_init:
2499 wake_lock_destroy(&msm_uport->rx.wake_lock);
2500 wake_lock_destroy(&msm_uport->dma_wake_lock);
2501 tasklet_kill(&msm_uport->tx.tlet);
2502 tasklet_kill(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302503 return ret;
2504}
2505
Mayank Ranaff398d02012-12-18 10:22:50 +05302506struct msm_serial_hs_platform_data
2507 *msm_hs_dt_to_pdata(struct platform_device *pdev)
2508{
2509 struct device_node *node = pdev->dev.of_node;
2510 struct msm_serial_hs_platform_data *pdata;
2511 int rx_to_inject, ret;
2512
2513 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2514 if (!pdata) {
2515 pr_err("unable to allocate memory for platform data\n");
2516 return ERR_PTR(-ENOMEM);
2517 }
2518
2519 /* UART TX GPIO */
2520 pdata->uart_tx_gpio = of_get_named_gpio(node,
2521 "qcom,tx-gpio", 0);
2522 if (pdata->uart_tx_gpio < 0)
2523 pr_debug("uart_tx_gpio is not available\n");
2524
2525 /* UART RX GPIO */
2526 pdata->uart_rx_gpio = of_get_named_gpio(node,
2527 "qcom,rx-gpio", 0);
2528 if (pdata->uart_rx_gpio < 0)
2529 pr_debug("uart_rx_gpio is not available\n");
2530
2531 /* UART CTS GPIO */
2532 pdata->uart_cts_gpio = of_get_named_gpio(node,
2533 "qcom,cts-gpio", 0);
2534 if (pdata->uart_cts_gpio < 0)
2535 pr_debug("uart_cts_gpio is not available\n");
2536
2537 /* UART RFR GPIO */
2538 pdata->uart_rfr_gpio = of_get_named_gpio(node,
2539 "qcom,rfr-gpio", 0);
2540 if (pdata->uart_rfr_gpio < 0)
2541 pr_debug("uart_rfr_gpio is not available\n");
2542
2543 pdata->inject_rx_on_wakeup = of_property_read_bool(node,
2544 "qcom,inject-rx-on-wakeup");
2545
2546 if (pdata->inject_rx_on_wakeup) {
2547 ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
2548 &rx_to_inject);
2549 if (ret < 0) {
2550 pr_err("Error: Rx_char_to_inject not specified.\n");
2551 return ERR_PTR(ret);
2552 }
2553 pdata->rx_to_inject = (char)rx_to_inject;
2554 }
2555
2556 ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
2557 &pdata->bam_tx_ep_pipe_index);
2558 if (ret < 0) {
2559 pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
2560 return ERR_PTR(ret);
2561 }
2562
2563 if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
2564 pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
2565 pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
2566 return ERR_PTR(-EINVAL);
2567 }
2568
2569 ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
2570 &pdata->bam_rx_ep_pipe_index);
2571 if (ret < 0) {
2572 pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
2573 return ERR_PTR(ret);
2574 }
2575
2576 if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
2577 pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
2578 pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
2579 return ERR_PTR(-EINVAL);
2580 }
2581
2582 pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
2583 "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
2584 pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
2585 pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
2586 pdata->uart_rfr_gpio);
2587
2588 return pdata;
2589}
2590
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302591
2592/**
2593 * Deallocate UART peripheral's SPS endpoint
2594 * @msm_uport - Pointer to msm_hs_port structure
2595 * @ep - Pointer to sps endpoint data structure
2596 */
2597
2598static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
2599 struct msm_hs_sps_ep_conn_data *ep)
2600{
2601 struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
2602 struct sps_connect *sps_config = &ep->config;
2603
2604 dma_free_coherent(msm_uport->uport.dev,
2605 sps_config->desc.size,
2606 &sps_config->desc.phys_base,
2607 GFP_KERNEL);
2608 sps_free_endpoint(sps_pipe_handle);
2609}
2610
2611
2612/**
2613 * Allocate UART peripheral's SPS endpoint
2614 *
2615 * This function allocates endpoint context
2616 * by calling appropriate SPS driver APIs.
2617 *
2618 * @msm_uport - Pointer to msm_hs_port structure
2619 * @ep - Pointer to sps endpoint data structure
2620 * @is_produce - 1 means Producer endpoint
2621 * - 0 means Consumer endpoint
2622 *
2623 * @return - 0 if successful else negative value
2624 */
2625
2626static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
2627 struct msm_hs_sps_ep_conn_data *ep,
2628 bool is_producer)
2629{
2630 int rc = 0;
2631 struct sps_pipe *sps_pipe_handle;
2632 struct sps_connect *sps_config = &ep->config;
2633 struct sps_register_event *sps_event = &ep->event;
2634
2635 /* Allocate endpoint context */
2636 sps_pipe_handle = sps_alloc_endpoint();
2637 if (!sps_pipe_handle) {
2638 pr_err("msm_serial_hs: sps_alloc_endpoint() failed!!\n"
2639 "is_producer=%d", is_producer);
2640 rc = -ENOMEM;
2641 goto out;
2642 }
2643
2644 /* Get default connection configuration for an endpoint */
2645 rc = sps_get_config(sps_pipe_handle, sps_config);
2646 if (rc) {
2647 pr_err("msm_serial_hs: sps_get_config() failed!!\n"
2648 "pipe_handle=0x%x rc=%d", (u32)sps_pipe_handle, rc);
2649 goto get_config_err;
2650 }
2651
2652 /* Modify the default connection configuration */
2653 if (is_producer) {
2654 /* For UART producer transfer, source is UART peripheral
2655 where as destination is system memory */
2656 sps_config->source = msm_uport->bam_handle;
2657 sps_config->destination = SPS_DEV_HANDLE_MEM;
2658 sps_config->mode = SPS_MODE_SRC;
2659 sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
2660 sps_config->dest_pipe_index = 0;
Saket Saurabh0dafb3c2013-04-12 11:44:04 +05302661 sps_config->options = SPS_O_DESC_DONE;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302662 } else {
2663 /* For UART consumer transfer, source is system memory
2664 where as destination is UART peripheral */
2665 sps_config->source = SPS_DEV_HANDLE_MEM;
2666 sps_config->destination = msm_uport->bam_handle;
2667 sps_config->mode = SPS_MODE_DEST;
2668 sps_config->src_pipe_index = 0;
2669 sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
2670 sps_config->options = SPS_O_EOT;
2671 }
2672
2673 sps_config->event_thresh = 0x10;
2674
2675 /* Allocate maximum descriptor fifo size */
2676 sps_config->desc.size = 65532;
2677 sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
2678 sps_config->desc.size,
2679 &sps_config->desc.phys_base,
2680 GFP_KERNEL);
2681 if (!sps_config->desc.base) {
2682 rc = -ENOMEM;
2683 pr_err("msm_serial_hs: dma_alloc_coherent() failed!!\n");
2684 goto get_config_err;
2685 }
2686 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
2687
2688 sps_event->mode = SPS_TRIGGER_CALLBACK;
Saket Saurabh0dafb3c2013-04-12 11:44:04 +05302689
2690 if (is_producer) {
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302691 sps_event->callback = msm_hs_sps_rx_callback;
Saket Saurabh0dafb3c2013-04-12 11:44:04 +05302692 sps_event->options = SPS_O_DESC_DONE;
2693 } else {
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302694 sps_event->callback = msm_hs_sps_tx_callback;
Saket Saurabh0dafb3c2013-04-12 11:44:04 +05302695 sps_event->options = SPS_O_EOT;
2696 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302697
2698 sps_event->user = (void *)msm_uport;
2699
2700 /* Now save the sps pipe handle */
2701 ep->pipe_handle = sps_pipe_handle;
2702 pr_debug("msm_serial_hs: success !! %s: pipe_handle=0x%x\n"
2703 "desc_fifo.phys_base=0x%x\n",
2704 is_producer ? "READ" : "WRITE",
2705 (u32)sps_pipe_handle, sps_config->desc.phys_base);
2706 return 0;
2707
2708get_config_err:
2709 sps_free_endpoint(sps_pipe_handle);
2710out:
2711 return rc;
2712}
2713
2714/**
2715 * Initialize SPS HW connected with UART core
2716 *
2717 * This function register BAM HW resources with
2718 * SPS driver and then initialize 2 SPS endpoints
2719 *
2720 * msm_uport - Pointer to msm_hs_port structure
2721 *
2722 * @return - 0 if successful else negative value
2723 */
2724
2725static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
2726{
2727 int rc = 0;
2728 struct sps_bam_props bam = {0};
2729 u32 bam_handle;
2730
2731 rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
2732 if (rc || !bam_handle) {
2733 bam.phys_addr = msm_uport->bam_mem;
2734 bam.virt_addr = msm_uport->bam_base;
2735 /*
2736 * This event thresold value is only significant for BAM-to-BAM
2737 * transfer. It's ignored for BAM-to-System mode transfer.
2738 */
2739 bam.event_threshold = 0x10; /* Pipe event threshold */
2740 bam.summing_threshold = 1; /* BAM event threshold */
2741
2742 /* SPS driver wll handle the UART BAM IRQ */
2743 bam.irq = (u32)msm_uport->bam_irq;
2744 bam.manage = SPS_BAM_MGR_LOCAL;
2745
2746 pr_debug("msm_serial_hs: bam physical base=0x%x\n",
2747 (u32)bam.phys_addr);
2748 pr_debug("msm_serial_hs: bam virtual base=0x%x\n",
2749 (u32)bam.virt_addr);
2750
2751 /* Register UART Peripheral BAM device to SPS driver */
2752 rc = sps_register_bam_device(&bam, &bam_handle);
2753 if (rc) {
2754 pr_err("msm_serial_hs: BAM device register failed\n");
2755 return rc;
2756 }
2757 pr_info("msm_serial_hs: BAM device registered. bam_handle=0x%x",
2758 msm_uport->bam_handle);
2759 }
2760 msm_uport->bam_handle = bam_handle;
2761
2762 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
2763 UART_SPS_PROD_PERIPHERAL);
2764 if (rc) {
2765 pr_err("%s: Failed to Init Producer BAM-pipe", __func__);
2766 goto deregister_bam;
2767 }
2768
2769 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
2770 UART_SPS_CONS_PERIPHERAL);
2771 if (rc) {
2772 pr_err("%s: Failed to Init Consumer BAM-pipe", __func__);
2773 goto deinit_ep_conn_prod;
2774 }
2775 return 0;
2776
2777deinit_ep_conn_prod:
2778 msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
2779deregister_bam:
2780 sps_deregister_bam_device(msm_uport->bam_handle);
2781 return rc;
2782}
2783
Saket Saurabh10e88b32013-02-04 15:26:34 +05302784#define BLSP_UART_NR 12
2785static int deviceid[BLSP_UART_NR] = {0};
2786static atomic_t msm_serial_hs_next_id = ATOMIC_INIT(0);
2787
Mayank Rana55046232011-03-07 10:28:42 +05302788static int __devinit msm_hs_probe(struct platform_device *pdev)
2789{
Saket Saurabh10e88b32013-02-04 15:26:34 +05302790 int ret = 0, alias_num = -1;
Mayank Rana55046232011-03-07 10:28:42 +05302791 struct uart_port *uport;
2792 struct msm_hs_port *msm_uport;
Mayank Ranaff398d02012-12-18 10:22:50 +05302793 struct resource *core_resource;
2794 struct resource *bam_resource;
Mayank Rana55046232011-03-07 10:28:42 +05302795 struct resource *resource;
Mayank Ranaff398d02012-12-18 10:22:50 +05302796 int core_irqres, bam_irqres;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002797 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
Mayank Ranaff398d02012-12-18 10:22:50 +05302798
2799 if (pdev->dev.of_node) {
2800 dev_dbg(&pdev->dev, "device tree enabled\n");
2801 pdata = msm_hs_dt_to_pdata(pdev);
2802 if (IS_ERR(pdata))
2803 return PTR_ERR(pdata);
2804
Saket Saurabh10e88b32013-02-04 15:26:34 +05302805 if (pdev->id == -1) {
2806 pdev->id = atomic_inc_return(&msm_serial_hs_next_id)-1;
2807 deviceid[pdev->id] = 1;
2808 }
2809
2810 /* Use alias from device tree if present
2811 * Alias is used as an optional property
2812 */
2813 alias_num = of_alias_get_id(pdev->dev.of_node, "uart");
2814 if (alias_num >= 0) {
2815 /* If alias_num is between 0 and 11, check that it not
2816 * equal to previous incremented pdev-ids. If it is
2817 * equal to previous pdev.ids , fail deviceprobe.
2818 */
2819 if (alias_num < BLSP_UART_NR) {
2820 if (deviceid[alias_num] == 0) {
2821 pdev->id = alias_num;
2822 } else {
2823 pr_err("alias_num=%d already used\n",
2824 alias_num);
2825 return -EINVAL;
2826 }
2827 } else {
2828 pdev->id = alias_num;
2829 }
2830 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302831
2832 pdev->dev.platform_data = pdata;
2833 }
Mayank Rana55046232011-03-07 10:28:42 +05302834
2835 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
Mayank Ranaff398d02012-12-18 10:22:50 +05302836 pr_err("Invalid plaform device ID = %d\n", pdev->id);
Mayank Rana55046232011-03-07 10:28:42 +05302837 return -EINVAL;
2838 }
2839
2840 msm_uport = &q_uart_port[pdev->id];
2841 uport = &msm_uport->uport;
Mayank Rana55046232011-03-07 10:28:42 +05302842 uport->dev = &pdev->dev;
2843
Mayank Ranaff398d02012-12-18 10:22:50 +05302844 if (pdev->dev.of_node)
2845 msm_uport->uart_type = BLSP_HSUART;
Mayank Rana55046232011-03-07 10:28:42 +05302846
Mayank Ranaff398d02012-12-18 10:22:50 +05302847 /* Get required resources for BAM HSUART */
2848 if (is_blsp_uart(msm_uport)) {
2849 core_resource = platform_get_resource_byname(pdev,
2850 IORESOURCE_MEM, "core_mem");
2851 bam_resource = platform_get_resource_byname(pdev,
2852 IORESOURCE_MEM, "bam_mem");
2853 core_irqres = platform_get_irq_byname(pdev, "core_irq");
2854 bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002855
Mayank Ranaff398d02012-12-18 10:22:50 +05302856 if (!core_resource) {
2857 pr_err("Invalid core HSUART Resources.\n");
2858 return -ENXIO;
2859 }
2860
2861 if (!bam_resource) {
2862 pr_err("Invalid BAM HSUART Resources.\n");
2863 return -ENXIO;
2864 }
2865
2866 if (!core_irqres) {
2867 pr_err("Invalid core irqres Resources.\n");
2868 return -ENXIO;
2869 }
2870 if (!bam_irqres) {
2871 pr_err("Invalid bam irqres Resources.\n");
2872 return -ENXIO;
2873 }
2874
2875 uport->mapbase = core_resource->start;
2876
2877 uport->membase = ioremap(uport->mapbase,
2878 resource_size(core_resource));
2879 if (unlikely(!uport->membase)) {
2880 pr_err("UART Resource ioremap Failed.\n");
2881 return -ENOMEM;
2882 }
2883 msm_uport->bam_mem = bam_resource->start;
2884 msm_uport->bam_base = ioremap(msm_uport->bam_mem,
2885 resource_size(bam_resource));
2886 if (unlikely(!msm_uport->bam_base)) {
2887 pr_err("UART BAM Resource ioremap Failed.\n");
2888 iounmap(uport->membase);
2889 return -ENOMEM;
2890 }
2891
2892 uport->irq = core_irqres;
2893 msm_uport->bam_irq = bam_irqres;
2894
Mayank Rana88d49142013-01-16 17:28:53 +05302895 msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2896 if (!msm_uport->bus_scale_table) {
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302897 pr_err("BLSP UART: Bus scaling is disabled.\n");
Mayank Rana88d49142013-01-16 17:28:53 +05302898 } else {
2899 msm_uport->bus_perf_client =
2900 msm_bus_scale_register_client
2901 (msm_uport->bus_scale_table);
2902 if (IS_ERR(&msm_uport->bus_perf_client)) {
2903 pr_err("%s(): Bus client register failed.\n",
2904 __func__);
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302905 ret = -EINVAL;
Mayank Rana88d49142013-01-16 17:28:53 +05302906 goto unmap_memory;
2907 }
2908 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302909 } else {
2910
2911 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2912 if (unlikely(!resource))
2913 return -ENXIO;
2914 uport->mapbase = resource->start;
2915 uport->membase = ioremap(uport->mapbase,
2916 resource_size(resource));
2917 if (unlikely(!uport->membase))
2918 return -ENOMEM;
2919
2920 uport->irq = platform_get_irq(pdev, 0);
2921 if (unlikely((int)uport->irq < 0)) {
2922 pr_err("UART IRQ Failed.\n");
2923 iounmap(uport->membase);
2924 return -ENXIO;
2925 }
2926 }
Mayank Rana55046232011-03-07 10:28:42 +05302927
Mayank Rana55046232011-03-07 10:28:42 +05302928 if (pdata == NULL)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002929 msm_uport->wakeup.irq = -1;
2930 else {
2931 msm_uport->wakeup.irq = pdata->wakeup_irq;
2932 msm_uport->wakeup.ignore = 1;
2933 msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
2934 msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
2935
Mayank Ranaff398d02012-12-18 10:22:50 +05302936 if (unlikely(msm_uport->wakeup.irq < 0)) {
2937 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302938 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302939 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002940
Mayank Ranaff398d02012-12-18 10:22:50 +05302941 if (is_blsp_uart(msm_uport)) {
2942 msm_uport->bam_tx_ep_pipe_index =
2943 pdata->bam_tx_ep_pipe_index;
2944 msm_uport->bam_rx_ep_pipe_index =
2945 pdata->bam_rx_ep_pipe_index;
2946 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002947 }
Mayank Rana55046232011-03-07 10:28:42 +05302948
Mayank Ranaff398d02012-12-18 10:22:50 +05302949 if (!is_blsp_uart(msm_uport)) {
Mayank Rana55046232011-03-07 10:28:42 +05302950
Mayank Ranaff398d02012-12-18 10:22:50 +05302951 resource = platform_get_resource_byname(pdev,
2952 IORESOURCE_DMA, "uartdm_channels");
2953 if (unlikely(!resource)) {
2954 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302955 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302956 }
2957
2958 msm_uport->dma_tx_channel = resource->start;
2959 msm_uport->dma_rx_channel = resource->end;
2960
2961 resource = platform_get_resource_byname(pdev,
2962 IORESOURCE_DMA, "uartdm_crci");
2963 if (unlikely(!resource)) {
2964 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302965 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302966 }
2967
2968 msm_uport->dma_tx_crci = resource->start;
2969 msm_uport->dma_rx_crci = resource->end;
2970 }
Mayank Rana55046232011-03-07 10:28:42 +05302971
2972 uport->iotype = UPIO_MEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002973 uport->fifosize = 64;
Mayank Rana55046232011-03-07 10:28:42 +05302974 uport->ops = &msm_hs_ops;
2975 uport->flags = UPF_BOOT_AUTOCONF;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002976 uport->uartclk = 7372800;
Mayank Rana55046232011-03-07 10:28:42 +05302977 msm_uport->imr_reg = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002978
Matt Wagantalle2522372011-08-17 14:52:21 -07002979 msm_uport->clk = clk_get(&pdev->dev, "core_clk");
Mayank Ranaff398d02012-12-18 10:22:50 +05302980 if (IS_ERR(msm_uport->clk)) {
2981 ret = PTR_ERR(msm_uport->clk);
Mayank Rana43c8baa2013-02-23 14:57:14 +05302982 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302983 }
Mayank Rana55046232011-03-07 10:28:42 +05302984
Matt Wagantalle2522372011-08-17 14:52:21 -07002985 msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002986 /*
2987 * Some configurations do not require explicit pclk control so
2988 * do not flag error on pclk get failure.
2989 */
2990 if (IS_ERR(msm_uport->pclk))
2991 msm_uport->pclk = NULL;
2992
2993 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
2994 if (ret) {
2995 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Rana43c8baa2013-02-23 14:57:14 +05302996 goto put_clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002997 }
2998
Mayank Ranacb589d82012-03-01 11:50:03 +05302999 msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
3000 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
3001 if (!msm_uport->hsuart_wq) {
3002 pr_err("%s(): Unable to create workqueue hsuart_wq\n",
3003 __func__);
Mayank Ranaff398d02012-12-18 10:22:50 +05303004 ret = -ENOMEM;
Mayank Rana43c8baa2013-02-23 14:57:14 +05303005 goto put_clk;
Mayank Ranacb589d82012-03-01 11:50:03 +05303006 }
3007
3008 INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303009
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303010 /* Init work for sps_disconnect in stop_rx_locked */
3011 INIT_WORK(&msm_uport->disconnect_rx_endpoint,
3012 hsuart_disconnect_rx_endpoint_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05303013 mutex_init(&msm_uport->clk_mutex);
3014
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303015 /* Initialize SPS HW connected with UART core */
3016 if (is_blsp_uart(msm_uport)) {
3017 ret = msm_hs_sps_init(msm_uport);
3018 if (unlikely(ret)) {
3019 pr_err("SPS Initialization failed ! err=%d", ret);
Mayank Rana43c8baa2013-02-23 14:57:14 +05303020 goto destroy_mutex;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303021 }
3022 }
3023
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303024 msm_hs_bus_voting(msm_uport, BUS_SCALING);
3025
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003026 clk_prepare_enable(msm_uport->clk);
3027 if (msm_uport->pclk)
3028 clk_prepare_enable(msm_uport->pclk);
3029
Mayank Rana55046232011-03-07 10:28:42 +05303030 ret = uartdm_init_port(uport);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003031 if (unlikely(ret)) {
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303032 goto err_clock;
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003033 }
Mayank Rana55046232011-03-07 10:28:42 +05303034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003035 /* configure the CR Protection to Enable */
3036 msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003037
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003038
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003039 /*
3040 * Enable Command register protection before going ahead as this hw
3041 * configuration makes sure that issued cmd to CR register gets complete
3042 * before next issued cmd start. Hence mb() requires here.
3043 */
3044 mb();
Mayank Rana55046232011-03-07 10:28:42 +05303045
3046 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
3047 hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
3048 HRTIMER_MODE_REL);
3049 msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
3050 msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
3051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003052 ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
3053 if (unlikely(ret))
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303054 goto err_clock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003055
3056 msm_serial_debugfs_init(msm_uport, pdev->id);
3057
Mayank Rana55046232011-03-07 10:28:42 +05303058 uport->line = pdev->id;
Saket Saurabh51690e52012-08-17 14:17:46 +05303059 if (pdata != NULL && pdata->userid && pdata->userid <= UARTDM_NR)
3060 uport->line = pdata->userid;
Mayank Ranaff398d02012-12-18 10:22:50 +05303061 ret = uart_add_one_port(&msm_hs_driver, uport);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303062 if (!ret) {
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303063 msm_hs_bus_voting(msm_uport, BUS_RESET);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303064 clk_disable_unprepare(msm_uport->clk);
3065 if (msm_uport->pclk)
3066 clk_disable_unprepare(msm_uport->pclk);
Mayank Ranaff398d02012-12-18 10:22:50 +05303067 return ret;
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303068 }
Mayank Ranaff398d02012-12-18 10:22:50 +05303069
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303070err_clock:
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303071
3072 msm_hs_bus_voting(msm_uport, BUS_RESET);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303073 clk_disable_unprepare(msm_uport->clk);
3074 if (msm_uport->pclk)
3075 clk_disable_unprepare(msm_uport->pclk);
Mayank Rana43c8baa2013-02-23 14:57:14 +05303076
3077destroy_mutex:
3078 mutex_destroy(&msm_uport->clk_mutex);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303079 destroy_workqueue(msm_uport->hsuart_wq);
Mayank Rana43c8baa2013-02-23 14:57:14 +05303080
3081put_clk:
3082 if (msm_uport->pclk)
3083 clk_put(msm_uport->pclk);
3084
3085 if (msm_uport->clk)
3086 clk_put(msm_uport->clk);
3087
3088deregister_bus_client:
3089 if (is_blsp_uart(msm_uport))
3090 msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
Mayank Ranaff398d02012-12-18 10:22:50 +05303091unmap_memory:
3092 iounmap(uport->membase);
3093 if (is_blsp_uart(msm_uport))
3094 iounmap(msm_uport->bam_base);
3095
3096 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303097}
3098
3099static int __init msm_serial_hs_init(void)
3100{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003101 int ret;
3102 int i;
Mayank Rana55046232011-03-07 10:28:42 +05303103
3104 /* Init all UARTS as non-configured */
3105 for (i = 0; i < UARTDM_NR; i++)
3106 q_uart_port[i].uport.type = PORT_UNKNOWN;
3107
Mayank Rana55046232011-03-07 10:28:42 +05303108 ret = uart_register_driver(&msm_hs_driver);
3109 if (unlikely(ret)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003110 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
3111 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303112 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003113 debug_base = debugfs_create_dir("msm_serial_hs", NULL);
3114 if (IS_ERR_OR_NULL(debug_base))
3115 pr_info("msm_serial_hs: Cannot create debugfs dir\n");
Mayank Rana55046232011-03-07 10:28:42 +05303116
3117 ret = platform_driver_register(&msm_serial_hs_platform_driver);
3118 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003119 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
3120 debugfs_remove_recursive(debug_base);
3121 uart_unregister_driver(&msm_hs_driver);
3122 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303123 }
3124
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003125 printk(KERN_INFO "msm_serial_hs module loaded\n");
Mayank Rana55046232011-03-07 10:28:42 +05303126 return ret;
3127}
Mayank Rana55046232011-03-07 10:28:42 +05303128
3129/*
3130 * Called by the upper layer when port is closed.
3131 * - Disables the port
3132 * - Unhook the ISR
3133 */
3134static void msm_hs_shutdown(struct uart_port *uport)
3135{
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303136 int ret;
3137 unsigned int data;
3138 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05303139 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05303140 struct platform_device *pdev = to_platform_device(uport->dev);
3141 const struct msm_serial_hs_platform_data *pdata =
3142 pdev->dev.platform_data;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303143 struct msm_hs_tx *tx = &msm_uport->tx;
3144 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05303145
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303146 if (msm_uport->tx.dma_in_flight) {
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303147 if (!is_blsp_uart(msm_uport)) {
3148 spin_lock_irqsave(&uport->lock, flags);
3149 /* disable UART TX interface to DM */
3150 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
3151 data &= ~UARTDM_TX_DM_EN_BMSK;
3152 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
3153 /* turn OFF UART Transmitter */
3154 msm_hs_write(uport, UARTDM_CR_ADDR,
3155 UARTDM_CR_TX_DISABLE_BMSK);
3156 /* reset UART TX */
3157 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
3158 /* reset UART TX Error */
3159 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX_ERROR);
3160 msm_uport->tx.flush = FLUSH_STOP;
3161 spin_unlock_irqrestore(&uport->lock, flags);
3162 /* discard flush */
3163 msm_dmov_flush(msm_uport->dma_tx_channel, 0);
3164 ret = wait_event_timeout(msm_uport->tx.wait,
3165 msm_uport->tx.flush == FLUSH_SHUTDOWN, 100);
3166 if (!ret)
3167 pr_err("%s():HSUART TX Stalls.\n", __func__);
3168 } else {
3169 /* BAM Disconnect for TX */
Saket Saurabh1bde0862013-04-12 15:47:36 +05303170 ret = sps_disconnect(sps_pipe_handle);
3171 if (ret)
3172 pr_err("%s(): sps_disconnect failed\n",
3173 __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303174 }
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303175 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003176 tasklet_kill(&msm_uport->tx.tlet);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303177 BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003178 wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
3179 tasklet_kill(&msm_uport->rx.tlet);
3180 cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05303181 flush_workqueue(msm_uport->hsuart_wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003182 pm_runtime_disable(uport->dev);
3183 pm_runtime_set_suspended(uport->dev);
Mayank Rana55046232011-03-07 10:28:42 +05303184
3185 /* Disable the transmitter */
3186 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
3187 /* Disable the receiver */
3188 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
3189
Mayank Rana55046232011-03-07 10:28:42 +05303190 msm_uport->imr_reg = 0;
3191 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003192 /*
3193 * Complete all device write before actually disabling uartclk.
3194 * Hence mb() requires here.
3195 */
3196 mb();
Mayank Rana88d49142013-01-16 17:28:53 +05303197
3198 /* Reset PNOC Bus Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303199 msm_hs_bus_voting(msm_uport, BUS_RESET);
Mayank Rana88d49142013-01-16 17:28:53 +05303200
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003201 if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
Mayank Ranacb589d82012-03-01 11:50:03 +05303202 /* to balance clk_state */
3203 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003204 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05303205 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003206 wake_unlock(&msm_uport->dma_wake_lock);
3207 }
Mayank Rana55046232011-03-07 10:28:42 +05303208
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303209 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
Mayank Rana55046232011-03-07 10:28:42 +05303210 dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
3211 UART_XMIT_SIZE, DMA_TO_DEVICE);
3212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003213 if (use_low_power_wakeup(msm_uport))
3214 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Mayank Rana55046232011-03-07 10:28:42 +05303215
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003216 /* Free the interrupt */
3217 free_irq(uport->irq, msm_uport);
3218 if (use_low_power_wakeup(msm_uport))
3219 free_irq(msm_uport->wakeup.irq, msm_uport);
Mayank Rana40836782012-11-16 14:45:47 +05303220
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05303221 if (is_blsp_uart(msm_uport)) {
3222 msm_hs_unconfig_uart_gpios(uport);
3223 } else {
3224 if (pdata && pdata->gpio_config)
3225 if (pdata->gpio_config(0))
3226 dev_err(uport->dev, "GPIO config error\n");
3227 }
Mayank Rana55046232011-03-07 10:28:42 +05303228}
3229
3230static void __exit msm_serial_hs_exit(void)
3231{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003232 printk(KERN_INFO "msm_serial_hs module removed\n");
Mayank Rana17e0e1a2012-04-07 02:10:33 +05303233 debugfs_remove_recursive(debug_base);
Mayank Rana55046232011-03-07 10:28:42 +05303234 platform_driver_unregister(&msm_serial_hs_platform_driver);
3235 uart_unregister_driver(&msm_hs_driver);
3236}
Mayank Rana55046232011-03-07 10:28:42 +05303237
Mayank Rana55046232011-03-07 10:28:42 +05303238static int msm_hs_runtime_idle(struct device *dev)
3239{
3240 /*
3241 * returning success from idle results in runtime suspend to be
3242 * called
3243 */
3244 return 0;
3245}
3246
3247static int msm_hs_runtime_resume(struct device *dev)
3248{
3249 struct platform_device *pdev = container_of(dev, struct
3250 platform_device, dev);
3251 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303252 msm_hs_request_clock_on(&msm_uport->uport);
3253 return 0;
3254}
3255
3256static int msm_hs_runtime_suspend(struct device *dev)
3257{
3258 struct platform_device *pdev = container_of(dev, struct
3259 platform_device, dev);
3260 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303261 msm_hs_request_clock_off(&msm_uport->uport);
3262 return 0;
3263}
Mayank Rana55046232011-03-07 10:28:42 +05303264
3265static const struct dev_pm_ops msm_hs_dev_pm_ops = {
3266 .runtime_suspend = msm_hs_runtime_suspend,
3267 .runtime_resume = msm_hs_runtime_resume,
3268 .runtime_idle = msm_hs_runtime_idle,
3269};
3270
Mayank Ranaff398d02012-12-18 10:22:50 +05303271static struct of_device_id msm_hs_match_table[] = {
3272 { .compatible = "qcom,msm-hsuart-v14" },
3273 {}
3274};
3275
Mayank Rana55046232011-03-07 10:28:42 +05303276static struct platform_driver msm_serial_hs_platform_driver = {
Mayank Rana17e0e1a2012-04-07 02:10:33 +05303277 .probe = msm_hs_probe,
Mayank Rana55046232011-03-07 10:28:42 +05303278 .remove = __devexit_p(msm_hs_remove),
3279 .driver = {
3280 .name = "msm_serial_hs",
Mayank Rana55046232011-03-07 10:28:42 +05303281 .pm = &msm_hs_dev_pm_ops,
Mayank Ranaff398d02012-12-18 10:22:50 +05303282 .of_match_table = msm_hs_match_table,
Mayank Rana55046232011-03-07 10:28:42 +05303283 },
3284};
3285
3286static struct uart_driver msm_hs_driver = {
3287 .owner = THIS_MODULE,
3288 .driver_name = "msm_serial_hs",
3289 .dev_name = "ttyHS",
3290 .nr = UARTDM_NR,
3291 .cons = 0,
3292};
3293
3294static struct uart_ops msm_hs_ops = {
3295 .tx_empty = msm_hs_tx_empty,
3296 .set_mctrl = msm_hs_set_mctrl_locked,
3297 .get_mctrl = msm_hs_get_mctrl_locked,
3298 .stop_tx = msm_hs_stop_tx_locked,
3299 .start_tx = msm_hs_start_tx_locked,
3300 .stop_rx = msm_hs_stop_rx_locked,
3301 .enable_ms = msm_hs_enable_ms_locked,
3302 .break_ctl = msm_hs_break_ctl,
3303 .startup = msm_hs_startup,
3304 .shutdown = msm_hs_shutdown,
3305 .set_termios = msm_hs_set_termios,
Mayank Rana55046232011-03-07 10:28:42 +05303306 .type = msm_hs_type,
3307 .config_port = msm_hs_config_port,
3308 .release_port = msm_hs_release_port,
3309 .request_port = msm_hs_request_port,
Saket Saurabhce394102012-10-29 19:51:28 +05303310 .flush_buffer = msm_hs_flush_buffer,
Mayank Rana55046232011-03-07 10:28:42 +05303311};
3312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003313module_init(msm_serial_hs_init);
3314module_exit(msm_serial_hs_exit);
Mayank Rana55046232011-03-07 10:28:42 +05303315MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
3316MODULE_VERSION("1.2");
3317MODULE_LICENSE("GPL v2");