blob: 21a936ed5fa6e47afb302a80ebda52f7ec9362d4 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* drivers/serial/msm_serial_hs.c
Mayank Rana55046232011-03-07 10:28:42 +05302 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 * MSM 7k High speed uart driver
4 *
Mayank Rana55046232011-03-07 10:28:42 +05305 * Copyright (c) 2008 Google Inc.
Mayank Ranaadc41562013-01-04 12:44:01 +05306 * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
Mayank Rana55046232011-03-07 10:28:42 +05307 * Modified: Nick Pelly <npelly@google.com>
8 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 * All source code in this file is licensed under the following license
10 * except where indicated.
11 *
Mayank Rana55046232011-03-07 10:28:42 +053012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * Has optional support for uart power management independent of linux
22 * suspend/resume:
23 *
24 * RX wakeup.
25 * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
26 * UART RX pin). This should only be used if there is not a wakeup
27 * GPIO on the UART CTS, and the first RX byte is known (for example, with the
28 * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
29 * always be lost. RTS will be asserted even while the UART is off in this mode
30 * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
31 */
32
33#include <linux/module.h>
34
35#include <linux/serial.h>
36#include <linux/serial_core.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/interrupt.h>
40#include <linux/irq.h>
41#include <linux/io.h>
42#include <linux/ioport.h>
Saket Saurabh10e88b32013-02-04 15:26:34 +053043#include <linux/atomic.h>
Mayank Rana55046232011-03-07 10:28:42 +053044#include <linux/kernel.h>
45#include <linux/timer.h>
46#include <linux/clk.h>
47#include <linux/platform_device.h>
48#include <linux/pm_runtime.h>
49#include <linux/dma-mapping.h>
50#include <linux/dmapool.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070051#include <linux/tty_flip.h>
Mayank Rana55046232011-03-07 10:28:42 +053052#include <linux/wait.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053#include <linux/sysfs.h>
54#include <linux/stat.h>
55#include <linux/device.h>
56#include <linux/wakelock.h>
57#include <linux/debugfs.h>
Mayank Ranaff398d02012-12-18 10:22:50 +053058#include <linux/of.h>
59#include <linux/of_device.h>
60#include <linux/of_gpio.h>
Saket Saurabhfe3b93b2013-02-04 18:44:12 +053061#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#include <asm/atomic.h>
Mayank Rana55046232011-03-07 10:28:42 +053063#include <asm/irq.h>
Mayank Rana55046232011-03-07 10:28:42 +053064
65#include <mach/hardware.h>
66#include <mach/dma.h>
Saket Saurabhcbf6c522013-01-07 16:30:37 +053067#include <mach/sps.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068#include <mach/msm_serial_hs.h>
Mayank Rana88d49142013-01-16 17:28:53 +053069#include <mach/msm_bus.h>
Mayank Rana55046232011-03-07 10:28:42 +053070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#include "msm_serial_hs_hwreg.h"
Saket Saurabhcbf6c522013-01-07 16:30:37 +053072#define UART_SPS_CONS_PERIPHERAL 0
73#define UART_SPS_PROD_PERIPHERAL 1
Mayank Rana55046232011-03-07 10:28:42 +053074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075static int hs_serial_debug_mask = 1;
76module_param_named(debug_mask, hs_serial_debug_mask,
77 int, S_IRUGO | S_IWUSR | S_IWGRP);
Mayank Ranaff398d02012-12-18 10:22:50 +053078/*
79 * There are 3 different kind of UART Core available on MSM.
80 * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
81 * and BSLP based HSUART.
82 */
83enum uart_core_type {
84 LEGACY_HSUART,
85 GSBI_HSUART,
86 BLSP_HSUART,
87};
Mayank Rana55046232011-03-07 10:28:42 +053088
Mayank Rana55046232011-03-07 10:28:42 +053089enum flush_reason {
90 FLUSH_NONE,
91 FLUSH_DATA_READY,
92 FLUSH_DATA_INVALID, /* values after this indicate invalid data */
93 FLUSH_IGNORE = FLUSH_DATA_INVALID,
94 FLUSH_STOP,
95 FLUSH_SHUTDOWN,
96};
97
Mayank Rana55046232011-03-07 10:28:42 +053098enum msm_hs_clk_states_e {
99 MSM_HS_CLK_PORT_OFF, /* port not in use */
100 MSM_HS_CLK_OFF, /* clock disabled */
101 MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
102 MSM_HS_CLK_ON, /* clock enabled */
103};
104
105/* Track the forced RXSTALE flush during clock off sequence.
106 * These states are only valid during MSM_HS_CLK_REQUEST_OFF */
107enum msm_hs_clk_req_off_state_e {
108 CLK_REQ_OFF_START,
109 CLK_REQ_OFF_RXSTALE_ISSUED,
110 CLK_REQ_OFF_FLUSH_ISSUED,
111 CLK_REQ_OFF_RXSTALE_FLUSHED,
112};
113
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530114/* SPS data structures to support HSUART with BAM
115 * @sps_pipe - This struct defines BAM pipe descriptor
116 * @sps_connect - This struct defines a connection's end point
117 * @sps_register - This struct defines a event registration parameters
118 */
119struct msm_hs_sps_ep_conn_data {
120 struct sps_pipe *pipe_handle;
121 struct sps_connect config;
122 struct sps_register_event event;
123};
124
Mayank Rana55046232011-03-07 10:28:42 +0530125struct msm_hs_tx {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 unsigned int tx_ready_int_en; /* ok to dma more tx */
127 unsigned int dma_in_flight; /* tx dma in progress */
Mayank Ranaaf2f0082012-05-22 10:16:02 +0530128 enum flush_reason flush;
129 wait_queue_head_t wait;
Mayank Rana55046232011-03-07 10:28:42 +0530130 struct msm_dmov_cmd xfer;
131 dmov_box *command_ptr;
132 u32 *command_ptr_ptr;
133 dma_addr_t mapped_cmd_ptr;
134 dma_addr_t mapped_cmd_ptr_ptr;
135 int tx_count;
136 dma_addr_t dma_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530138 struct msm_hs_sps_ep_conn_data cons;
Mayank Rana55046232011-03-07 10:28:42 +0530139};
140
Mayank Rana55046232011-03-07 10:28:42 +0530141struct msm_hs_rx {
142 enum flush_reason flush;
143 struct msm_dmov_cmd xfer;
144 dma_addr_t cmdptr_dmaaddr;
145 dmov_box *command_ptr;
146 u32 *command_ptr_ptr;
147 dma_addr_t mapped_cmd_ptr;
148 wait_queue_head_t wait;
149 dma_addr_t rbuffer;
150 unsigned char *buffer;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 unsigned int buffer_pending;
Mayank Rana55046232011-03-07 10:28:42 +0530152 struct dma_pool *pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 struct wake_lock wake_lock;
154 struct delayed_work flip_insert_work;
155 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530156 struct msm_hs_sps_ep_conn_data prod;
Mayank Rana55046232011-03-07 10:28:42 +0530157};
158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159enum buffer_states {
160 NONE_PENDING = 0x0,
161 FIFO_OVERRUN = 0x1,
162 PARITY_ERROR = 0x2,
163 CHARS_NORMAL = 0x4,
164};
165
166/* optional low power wakeup, typically on a GPIO RX irq */
167struct msm_hs_wakeup {
Mayank Rana55046232011-03-07 10:28:42 +0530168 int irq; /* < 0 indicates low power wakeup disabled */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 unsigned char ignore; /* bool */
170
171 /* bool: inject char into rx tty on wakeup */
Mayank Rana55046232011-03-07 10:28:42 +0530172 unsigned char inject_rx;
173 char rx_to_inject;
174};
175
Mayank Rana55046232011-03-07 10:28:42 +0530176struct msm_hs_port {
177 struct uart_port uport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 unsigned long imr_reg; /* shadow value of UARTDM_IMR */
Mayank Rana55046232011-03-07 10:28:42 +0530179 struct clk *clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 struct clk *pclk;
Mayank Rana55046232011-03-07 10:28:42 +0530181 struct msm_hs_tx tx;
182 struct msm_hs_rx rx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 /* gsbi uarts have to do additional writes to gsbi memory */
184 /* block and top control status block. The following pointers */
185 /* keep a handle to these blocks. */
186 unsigned char __iomem *mapped_gsbi;
Mayank Rana55046232011-03-07 10:28:42 +0530187 int dma_tx_channel;
188 int dma_rx_channel;
189 int dma_tx_crci;
190 int dma_rx_crci;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 struct hrtimer clk_off_timer; /* to poll TXEMT before clock off */
Mayank Rana55046232011-03-07 10:28:42 +0530192 ktime_t clk_off_delay;
193 enum msm_hs_clk_states_e clk_state;
194 enum msm_hs_clk_req_off_state_e clk_req_off_state;
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 struct msm_hs_wakeup wakeup;
197 struct wake_lock dma_wake_lock; /* held while any DMA active */
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530198
199 struct dentry *loopback_dir;
Mayank Ranacb589d82012-03-01 11:50:03 +0530200 struct work_struct clock_off_w; /* work for actual clock off */
201 struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
202 struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530203 struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
Saket Saurabhce394102012-10-29 19:51:28 +0530204 bool tty_flush_receive;
Mayank Ranaff398d02012-12-18 10:22:50 +0530205 enum uart_core_type uart_type;
206 u32 bam_handle;
207 resource_size_t bam_mem;
208 int bam_irq;
209 unsigned char __iomem *bam_base;
210 unsigned int bam_tx_ep_pipe_index;
211 unsigned int bam_rx_ep_pipe_index;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530212 /* struct sps_event_notify is an argument passed when triggering a
213 * callback event object registered for an SPS connection end point.
214 */
215 struct sps_event_notify notify;
Mayank Rana88d49142013-01-16 17:28:53 +0530216 /* bus client handler */
217 u32 bus_perf_client;
218 /* BLSP UART required BUS Scaling data */
219 struct msm_bus_scale_pdata *bus_scale_table;
Mayank Rana9c8bda92013-02-28 11:58:04 +0530220 bool rx_discard_flush_issued;
Mayank Rana05396b22013-03-16 19:10:11 +0530221 int rx_count_callback;
Mayank Rana55046232011-03-07 10:28:42 +0530222};
223
224#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
225#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
226#define UARTDM_RX_BUF_SIZE 512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227#define RETRY_TIMEOUT 5
Saket Saurabh51690e52012-08-17 14:17:46 +0530228#define UARTDM_NR 256
Mayank Ranaff398d02012-12-18 10:22:50 +0530229#define BAM_PIPE_MIN 0
230#define BAM_PIPE_MAX 11
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530231#define BUS_SCALING 1
232#define BUS_RESET 0
Mayank Rana9c8bda92013-02-28 11:58:04 +0530233#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
Mayank Rana55046232011-03-07 10:28:42 +0530234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235static struct dentry *debug_base;
Mayank Rana55046232011-03-07 10:28:42 +0530236static struct msm_hs_port q_uart_port[UARTDM_NR];
237static struct platform_driver msm_serial_hs_platform_driver;
238static struct uart_driver msm_hs_driver;
239static struct uart_ops msm_hs_ops;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530240static void msm_hs_start_rx_locked(struct uart_port *uport);
241static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr);
242static void flip_insert_work(struct work_struct *work);
Mayank Rana55046232011-03-07 10:28:42 +0530243
244#define UARTDM_TO_MSM(uart_port) \
245 container_of((uart_port), struct msm_hs_port, uport)
246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
248 char *buf)
Mayank Rana55046232011-03-07 10:28:42 +0530249{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700250 int state = 1;
251 enum msm_hs_clk_states_e clk_state;
252 unsigned long flags;
253
254 struct platform_device *pdev = container_of(dev, struct
255 platform_device, dev);
256 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
257
258 spin_lock_irqsave(&msm_uport->uport.lock, flags);
259 clk_state = msm_uport->clk_state;
260 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
261
262 if (clk_state <= MSM_HS_CLK_OFF)
263 state = 0;
264
Mayank Rana18958b02011-09-28 12:33:36 +0530265 return snprintf(buf, PAGE_SIZE, "%d\n", state);
Mayank Rana55046232011-03-07 10:28:42 +0530266}
267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700268static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
269 const char *buf, size_t count)
270{
271 int state;
272 struct platform_device *pdev = container_of(dev, struct
273 platform_device, dev);
274 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
275
276 state = buf[0] - '0';
277 switch (state) {
278 case 0: {
279 msm_hs_request_clock_off(&msm_uport->uport);
280 break;
281 }
282 case 1: {
283 msm_hs_request_clock_on(&msm_uport->uport);
284 break;
285 }
286 default: {
287 return -EINVAL;
288 }
289 }
290 return count;
291}
292
293static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
294
295static inline unsigned int use_low_power_wakeup(struct msm_hs_port *msm_uport)
296{
297 return (msm_uport->wakeup.irq > 0);
298}
299
300static inline int is_gsbi_uart(struct msm_hs_port *msm_uport)
301{
302 /* assume gsbi uart if gsbi resource found in pdata */
303 return ((msm_uport->mapped_gsbi != NULL));
304}
Mayank Ranaff398d02012-12-18 10:22:50 +0530305static unsigned int is_blsp_uart(struct msm_hs_port *msm_uport)
306{
307 return (msm_uport->uart_type == BLSP_HSUART);
308}
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530309
310static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
311{
312 int ret;
313
314 if (is_blsp_uart(msm_uport) && msm_uport->bus_perf_client) {
315 pr_debug("Bus voting:%d\n", vote);
316 ret = msm_bus_scale_client_update_request(
317 msm_uport->bus_perf_client, vote);
318 if (ret)
319 pr_err("%s(): Failed for Bus voting: %d\n",
320 __func__, vote);
321 }
322}
323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700324static inline unsigned int msm_hs_read(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +0530325 unsigned int offset)
326{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700327 return readl_relaxed(uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530328}
329
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700330static inline void msm_hs_write(struct uart_port *uport, unsigned int offset,
Mayank Rana55046232011-03-07 10:28:42 +0530331 unsigned int value)
332{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700333 writel_relaxed(value, uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530334}
335
336static void msm_hs_release_port(struct uart_port *port)
337{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700338 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
339 struct platform_device *pdev = to_platform_device(port->dev);
340 struct resource *gsbi_resource;
341 resource_size_t size;
342
343 if (is_gsbi_uart(msm_uport)) {
344 iowrite32(GSBI_PROTOCOL_IDLE, msm_uport->mapped_gsbi +
345 GSBI_CONTROL_ADDR);
346 gsbi_resource = platform_get_resource_byname(pdev,
347 IORESOURCE_MEM,
348 "gsbi_resource");
Mayank Rana53a2c772011-11-01 14:29:14 +0530349 if (unlikely(!gsbi_resource))
350 return;
351
352 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700353 release_mem_region(gsbi_resource->start, size);
354 iounmap(msm_uport->mapped_gsbi);
355 msm_uport->mapped_gsbi = NULL;
356 }
Mayank Rana55046232011-03-07 10:28:42 +0530357}
358
359static int msm_hs_request_port(struct uart_port *port)
360{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700361 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
362 struct platform_device *pdev = to_platform_device(port->dev);
363 struct resource *gsbi_resource;
364 resource_size_t size;
Mayank Rana55046232011-03-07 10:28:42 +0530365
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366 gsbi_resource = platform_get_resource_byname(pdev,
367 IORESOURCE_MEM,
368 "gsbi_resource");
369 if (gsbi_resource) {
Mayank Rana53a2c772011-11-01 14:29:14 +0530370 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700371 if (unlikely(!request_mem_region(gsbi_resource->start, size,
372 "msm_serial_hs")))
373 return -EBUSY;
374 msm_uport->mapped_gsbi = ioremap(gsbi_resource->start,
375 size);
376 if (!msm_uport->mapped_gsbi) {
377 release_mem_region(gsbi_resource->start, size);
378 return -EBUSY;
379 }
380 }
381 /* no gsbi uart */
Mayank Rana55046232011-03-07 10:28:42 +0530382 return 0;
383}
384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700385static int msm_serial_loopback_enable_set(void *data, u64 val)
386{
387 struct msm_hs_port *msm_uport = data;
388 struct uart_port *uport = &(msm_uport->uport);
389 unsigned long flags;
390 int ret = 0;
391
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530392 msm_hs_bus_voting(msm_uport, BUS_SCALING);
393
Mayank Ranacb589d82012-03-01 11:50:03 +0530394 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700395 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530396 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700397
398 if (val) {
399 spin_lock_irqsave(&uport->lock, flags);
400 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530401 if (is_blsp_uart(msm_uport))
402 ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
403 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
404 else
405 ret |= UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700406 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
407 spin_unlock_irqrestore(&uport->lock, flags);
408 } else {
409 spin_lock_irqsave(&uport->lock, flags);
410 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530411 if (is_blsp_uart(msm_uport))
412 ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
413 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
414 else
415 ret &= ~UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700416 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
417 spin_unlock_irqrestore(&uport->lock, flags);
418 }
419 /* Calling CLOCK API. Hence mb() requires here. */
420 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +0530421 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700422 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530423 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700424
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530425 msm_hs_bus_voting(msm_uport, BUS_RESET);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700426 return 0;
427}
428
429static int msm_serial_loopback_enable_get(void *data, u64 *val)
430{
431 struct msm_hs_port *msm_uport = data;
432 struct uart_port *uport = &(msm_uport->uport);
433 unsigned long flags;
434 int ret = 0;
435
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530436 msm_hs_bus_voting(msm_uport, BUS_SCALING);
437
Mayank Ranacb589d82012-03-01 11:50:03 +0530438 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700439 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530440 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700441
442 spin_lock_irqsave(&uport->lock, flags);
443 ret = msm_hs_read(&msm_uport->uport, UARTDM_MR2_ADDR);
444 spin_unlock_irqrestore(&uport->lock, flags);
445
Mayank Ranacb589d82012-03-01 11:50:03 +0530446 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530448 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449
450 *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530451
452 msm_hs_bus_voting(msm_uport, BUS_RESET);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700453 return 0;
454}
455DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
456 msm_serial_loopback_enable_set, "%llu\n");
457
458/*
459 * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
460 * writing 1 turns on internal loopback mode in HW. Useful for automation
461 * test scripts.
462 * writing 0 disables the internal loopback mode. Default is disabled.
463 */
Stephen Boyd7bce0972012-04-25 11:54:27 -0700464static void __devinit msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700465 int id)
466{
467 char node_name[15];
468 snprintf(node_name, sizeof(node_name), "loopback.%d", id);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530469 msm_uport->loopback_dir = debugfs_create_file(node_name,
470 S_IRUGO | S_IWUSR,
471 debug_base,
472 msm_uport,
473 &loopback_enable_fops);
474
475 if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
476 pr_err("%s(): Cannot create loopback.%d debug entry",
477 __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700478}
479
Mayank Rana55046232011-03-07 10:28:42 +0530480static int __devexit msm_hs_remove(struct platform_device *pdev)
481{
482
483 struct msm_hs_port *msm_uport;
484 struct device *dev;
485
486 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
487 printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
488 return -EINVAL;
489 }
490
491 msm_uport = &q_uart_port[pdev->id];
492 dev = msm_uport->uport.dev;
493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700494 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530495 debugfs_remove(msm_uport->loopback_dir);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700496
Mayank Rana55046232011-03-07 10:28:42 +0530497 dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
498 DMA_TO_DEVICE);
499 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
500 msm_uport->rx.rbuffer);
501 dma_pool_destroy(msm_uport->rx.pool);
502
Mayank Rana8431de82011-12-08 09:06:08 +0530503 dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530504 DMA_TO_DEVICE);
Mayank Rana8431de82011-12-08 09:06:08 +0530505 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530506 DMA_TO_DEVICE);
507 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
508 DMA_TO_DEVICE);
509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700510 wake_lock_destroy(&msm_uport->rx.wake_lock);
511 wake_lock_destroy(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +0530512 destroy_workqueue(msm_uport->hsuart_wq);
513 mutex_destroy(&msm_uport->clk_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700514
Mayank Rana55046232011-03-07 10:28:42 +0530515 uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
516 clk_put(msm_uport->clk);
Mayank Ranacb589d82012-03-01 11:50:03 +0530517 if (msm_uport->pclk)
518 clk_put(msm_uport->pclk);
Mayank Rana55046232011-03-07 10:28:42 +0530519
520 /* Free the tx resources */
521 kfree(msm_uport->tx.command_ptr);
522 kfree(msm_uport->tx.command_ptr_ptr);
523
524 /* Free the rx resources */
525 kfree(msm_uport->rx.command_ptr);
526 kfree(msm_uport->rx.command_ptr_ptr);
527
528 iounmap(msm_uport->uport.membase);
529
530 return 0;
531}
532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533static int msm_hs_init_clk(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530534{
535 int ret;
536 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
537
Mayank Rana55046232011-03-07 10:28:42 +0530538 /* Set up the MREG/NREG/DREG/MNDREG */
539 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
540 if (ret) {
541 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Rana55046232011-03-07 10:28:42 +0530542 return ret;
543 }
544
Mayank Ranacb589d82012-03-01 11:50:03 +0530545 ret = clk_prepare_enable(msm_uport->clk);
Mayank Rana55046232011-03-07 10:28:42 +0530546 if (ret) {
547 printk(KERN_ERR "Error could not turn on UART clk\n");
548 return ret;
549 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700550 if (msm_uport->pclk) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530551 ret = clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700552 if (ret) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530553 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 dev_err(uport->dev,
555 "Error could not turn on UART pclk\n");
556 return ret;
557 }
Mayank Rana55046232011-03-07 10:28:42 +0530558 }
559
560 msm_uport->clk_state = MSM_HS_CLK_ON;
561 return 0;
562}
563
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530564
565/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
566 *
567 * Also registers a SPS callback function for the consumer
568 * process with the SPS driver
569 *
570 * @uport - Pointer to uart uport structure
571 *
572 * @return - 0 if successful else negative value.
573 *
574 */
575
576static int msm_hs_spsconnect_tx(struct uart_port *uport)
577{
578 int ret;
579 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
580 struct msm_hs_tx *tx = &msm_uport->tx;
581 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
582 struct sps_connect *sps_config = &tx->cons.config;
583 struct sps_register_event *sps_event = &tx->cons.event;
584
585 /* Establish connection between peripheral and memory endpoint */
586 ret = sps_connect(sps_pipe_handle, sps_config);
587 if (ret) {
588 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
589 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
590 return ret;
591 }
592 /* Register callback event for EOT (End of transfer) event. */
593 ret = sps_register_event(sps_pipe_handle, sps_event);
594 if (ret) {
595 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
596 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
597 goto reg_event_err;
598 }
599 return 0;
600
601reg_event_err:
602 sps_disconnect(sps_pipe_handle);
603 return ret;
604}
605
606/* Connect a UART peripheral's SPS endpoint(producer endpoint)
607 *
608 * Also registers a SPS callback function for the producer
609 * process with the SPS driver
610 *
611 * @uport - Pointer to uart uport structure
612 *
613 * @return - 0 if successful else negative value.
614 *
615 */
616
617static int msm_hs_spsconnect_rx(struct uart_port *uport)
618{
619 int ret;
620 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
621 struct msm_hs_rx *rx = &msm_uport->rx;
622 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
623 struct sps_connect *sps_config = &rx->prod.config;
624 struct sps_register_event *sps_event = &rx->prod.event;
625
626 /* Establish connection between peripheral and memory endpoint */
627 ret = sps_connect(sps_pipe_handle, sps_config);
628 if (ret) {
629 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
630 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
631 return ret;
632 }
633 /* Register callback event for EOT (End of transfer) event. */
634 ret = sps_register_event(sps_pipe_handle, sps_event);
635 if (ret) {
636 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
637 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
638 goto reg_event_err;
639 }
640 return 0;
641
642reg_event_err:
643 sps_disconnect(sps_pipe_handle);
644 return ret;
645}
646
Mayank Rana55046232011-03-07 10:28:42 +0530647/*
648 * programs the UARTDM_CSR register with correct bit rates
649 *
650 * Interrupts should be disabled before we are called, as
651 * we modify Set Baud rate
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652 * Set receive stale interrupt level, dependant on Bit Rate
Mayank Rana55046232011-03-07 10:28:42 +0530653 * Goal is to have around 8 ms before indicate stale.
654 * roundup (((Bit Rate * .008) / 10) + 1
655 */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530656static void msm_hs_set_bps_locked(struct uart_port *uport,
657 unsigned int bps)
Mayank Rana55046232011-03-07 10:28:42 +0530658{
659 unsigned long rxstale;
660 unsigned long data;
661 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
662
663 switch (bps) {
664 case 300:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700665 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00);
Mayank Rana55046232011-03-07 10:28:42 +0530666 rxstale = 1;
667 break;
668 case 600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700669 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11);
Mayank Rana55046232011-03-07 10:28:42 +0530670 rxstale = 1;
671 break;
672 case 1200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700673 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22);
Mayank Rana55046232011-03-07 10:28:42 +0530674 rxstale = 1;
675 break;
676 case 2400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700677 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33);
Mayank Rana55046232011-03-07 10:28:42 +0530678 rxstale = 1;
679 break;
680 case 4800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700681 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44);
Mayank Rana55046232011-03-07 10:28:42 +0530682 rxstale = 1;
683 break;
684 case 9600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700685 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55);
Mayank Rana55046232011-03-07 10:28:42 +0530686 rxstale = 2;
687 break;
688 case 14400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700689 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66);
Mayank Rana55046232011-03-07 10:28:42 +0530690 rxstale = 3;
691 break;
692 case 19200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77);
Mayank Rana55046232011-03-07 10:28:42 +0530694 rxstale = 4;
695 break;
696 case 28800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700697 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88);
Mayank Rana55046232011-03-07 10:28:42 +0530698 rxstale = 6;
699 break;
700 case 38400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
Mayank Rana55046232011-03-07 10:28:42 +0530702 rxstale = 8;
703 break;
704 case 57600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
Mayank Rana55046232011-03-07 10:28:42 +0530706 rxstale = 16;
707 break;
708 case 76800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
Mayank Rana55046232011-03-07 10:28:42 +0530710 rxstale = 16;
711 break;
712 case 115200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700713 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
Mayank Rana55046232011-03-07 10:28:42 +0530714 rxstale = 31;
715 break;
716 case 230400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
Mayank Rana55046232011-03-07 10:28:42 +0530718 rxstale = 31;
719 break;
720 case 460800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700721 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530722 rxstale = 31;
723 break;
724 case 4000000:
725 case 3686400:
726 case 3200000:
727 case 3500000:
728 case 3000000:
729 case 2500000:
730 case 1500000:
731 case 1152000:
732 case 1000000:
733 case 921600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700734 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530735 rxstale = 31;
736 break;
737 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530739 /* default to 9600 */
740 bps = 9600;
741 rxstale = 2;
742 break;
743 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 /*
745 * uart baud rate depends on CSR and MND Values
746 * we are updating CSR before and then calling
747 * clk_set_rate which updates MND Values. Hence
748 * dsb requires here.
749 */
750 mb();
751 if (bps > 460800) {
Mayank Rana55046232011-03-07 10:28:42 +0530752 uport->uartclk = bps * 16;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700753 } else {
754 uport->uartclk = 7372800;
755 }
Mayank Ranae6725162012-08-22 17:44:25 +0530756
Mayank Rana55046232011-03-07 10:28:42 +0530757 if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
758 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Ranae6725162012-08-22 17:44:25 +0530759 WARN_ON(1);
Mayank Rana55046232011-03-07 10:28:42 +0530760 }
761
762 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
763 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
764
765 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530766 /*
767 * It is suggested to do reset of transmitter and receiver after
768 * changing any protocol configuration. Here Baud rate and stale
769 * timeout are getting updated. Hence reset transmitter and receiver.
770 */
771 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
772 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
Mayank Rana55046232011-03-07 10:28:42 +0530773}
774
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700775
776static void msm_hs_set_std_bps_locked(struct uart_port *uport,
777 unsigned int bps)
778{
779 unsigned long rxstale;
780 unsigned long data;
781
782 switch (bps) {
783 case 9600:
784 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
785 rxstale = 2;
786 break;
787 case 14400:
788 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
789 rxstale = 3;
790 break;
791 case 19200:
792 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
793 rxstale = 4;
794 break;
795 case 28800:
796 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
797 rxstale = 6;
798 break;
799 case 38400:
800 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xdd);
801 rxstale = 8;
802 break;
803 case 57600:
804 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
805 rxstale = 16;
806 break;
807 case 115200:
808 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
809 rxstale = 31;
810 break;
811 default:
812 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
813 /* default to 9600 */
814 bps = 9600;
815 rxstale = 2;
816 break;
817 }
818
819 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
820 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
821
822 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530823}
824
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530825
Mayank Rana55046232011-03-07 10:28:42 +0530826/*
827 * termios : new ktermios
828 * oldtermios: old ktermios previous setting
829 *
830 * Configure the serial port
831 */
832static void msm_hs_set_termios(struct uart_port *uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 struct ktermios *termios,
834 struct ktermios *oldtermios)
Mayank Rana55046232011-03-07 10:28:42 +0530835{
836 unsigned int bps;
837 unsigned long data;
Mayank Rana9c8bda92013-02-28 11:58:04 +0530838 int ret;
Mayank Rana55046232011-03-07 10:28:42 +0530839 unsigned int c_cflag = termios->c_cflag;
840 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530841 struct msm_hs_rx *rx = &msm_uport->rx;
842 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +0530843
Mayank Ranae6725162012-08-22 17:44:25 +0530844 mutex_lock(&msm_uport->clk_mutex);
Saket Saurabha8bd52e2013-02-15 12:50:27 +0530845 msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
Mayank Rana55046232011-03-07 10:28:42 +0530846
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530847 /*
848 * Disable Rx channel of UARTDM
849 * DMA Rx Stall happens if enqueue and flush of Rx command happens
850 * concurrently. Hence before changing the baud rate/protocol
851 * configuration and sending flush command to ADM, disable the Rx
852 * channel of UARTDM.
853 * Note: should not reset the receiver here immediately as it is not
854 * suggested to do disable/reset or reset/disable at the same time.
855 */
856 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530857 if (is_blsp_uart(msm_uport)) {
858 /* Disable UARTDM RX BAM Interface */
859 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
860 } else {
861 data &= ~UARTDM_RX_DM_EN_BMSK;
862 }
863
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530864 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530865
866 /* 300 is the minimum baud support by the driver */
867 bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
868
869 /* Temporary remapping 200 BAUD to 3.2 mbps */
870 if (bps == 200)
871 bps = 3200000;
872
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700873 uport->uartclk = clk_get_rate(msm_uport->clk);
874 if (!uport->uartclk)
875 msm_hs_set_std_bps_locked(uport, bps);
876 else
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530877 msm_hs_set_bps_locked(uport, bps);
Mayank Rana55046232011-03-07 10:28:42 +0530878
879 data = msm_hs_read(uport, UARTDM_MR2_ADDR);
880 data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
881 /* set parity */
882 if (PARENB == (c_cflag & PARENB)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 if (PARODD == (c_cflag & PARODD)) {
Mayank Rana55046232011-03-07 10:28:42 +0530884 data |= ODD_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 } else if (CMSPAR == (c_cflag & CMSPAR)) {
Mayank Rana55046232011-03-07 10:28:42 +0530886 data |= SPACE_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700887 } else {
Mayank Rana55046232011-03-07 10:28:42 +0530888 data |= EVEN_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700889 }
Mayank Rana55046232011-03-07 10:28:42 +0530890 }
891
892 /* Set bits per char */
893 data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
894
895 switch (c_cflag & CSIZE) {
896 case CS5:
897 data |= FIVE_BPC;
898 break;
899 case CS6:
900 data |= SIX_BPC;
901 break;
902 case CS7:
903 data |= SEVEN_BPC;
904 break;
905 default:
906 data |= EIGHT_BPC;
907 break;
908 }
909 /* stop bits */
910 if (c_cflag & CSTOPB) {
911 data |= STOP_BIT_TWO;
912 } else {
913 /* otherwise 1 stop bit */
914 data |= STOP_BIT_ONE;
915 }
916 data |= UARTDM_MR2_ERROR_MODE_BMSK;
917 /* write parity/bits per char/stop bit configuration */
918 msm_hs_write(uport, UARTDM_MR2_ADDR, data);
919
920 /* Configure HW flow control */
921 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
922
923 data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
924
925 if (c_cflag & CRTSCTS) {
926 data |= UARTDM_MR1_CTS_CTL_BMSK;
927 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
928 }
929
930 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
931
932 uport->ignore_status_mask = termios->c_iflag & INPCK;
933 uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
Mayank Ranaadc41562013-01-04 12:44:01 +0530934 uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
Mayank Rana85aeee12012-11-27 14:49:46 +0530935
Mayank Rana55046232011-03-07 10:28:42 +0530936 uport->read_status_mask = (termios->c_cflag & CREAD);
937
Mayank Rana55046232011-03-07 10:28:42 +0530938
939 /* Set Transmit software time out */
940 uart_update_timeout(uport, c_cflag, bps);
941
942 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
943 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
944
945 if (msm_uport->rx.flush == FLUSH_NONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700946 wake_lock(&msm_uport->rx.wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +0530947 msm_uport->rx.flush = FLUSH_IGNORE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700948 /*
949 * Before using dmov APIs make sure that
950 * previous writel are completed. Hence
951 * dsb requires here.
952 */
953 mb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530954 if (is_blsp_uart(msm_uport)) {
955 sps_disconnect(sps_pipe_handle);
956 msm_hs_spsconnect_rx(uport);
957 msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
958 } else {
Mayank Rana9c8bda92013-02-28 11:58:04 +0530959 msm_uport->rx_discard_flush_issued = true;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530960 /* do discard flush */
961 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
Mayank Rana9c8bda92013-02-28 11:58:04 +0530962 pr_debug("%s(): wainting for flush completion.\n",
963 __func__);
964 ret = wait_event_timeout(msm_uport->rx.wait,
965 msm_uport->rx_discard_flush_issued == false,
966 RX_FLUSH_COMPLETE_TIMEOUT);
967 if (!ret)
968 pr_err("%s(): Discard flush pending.\n",
969 __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530970 }
Mayank Rana55046232011-03-07 10:28:42 +0530971 }
972
973 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700974 mb();
Mayank Ranae6725162012-08-22 17:44:25 +0530975 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +0530976}
977
978/*
979 * Standard API, Transmitter
980 * Any character in the transmit shift register is sent
981 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700982unsigned int msm_hs_tx_empty(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530983{
984 unsigned int data;
985 unsigned int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +0530986
987 data = msm_hs_read(uport, UARTDM_SR_ADDR);
988 if (data & UARTDM_SR_TXEMT_BMSK)
989 ret = TIOCSER_TEMT;
990
Mayank Rana55046232011-03-07 10:28:42 +0530991 return ret;
992}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700993EXPORT_SYMBOL(msm_hs_tx_empty);
Mayank Rana55046232011-03-07 10:28:42 +0530994
995/*
996 * Standard API, Stop transmitter.
997 * Any character in the transmit shift register is sent as
998 * well as the current data mover transfer .
999 */
1000static void msm_hs_stop_tx_locked(struct uart_port *uport)
1001{
1002 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1003
1004 msm_uport->tx.tx_ready_int_en = 0;
1005}
1006
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301007/* Disconnect BAM RX Endpoint Pipe Index from workqueue context*/
1008static void hsuart_disconnect_rx_endpoint_work(struct work_struct *w)
1009{
1010 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
1011 disconnect_rx_endpoint);
1012 struct msm_hs_rx *rx = &msm_uport->rx;
1013 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
1014
1015 sps_disconnect(sps_pipe_handle);
1016 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
1017 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1018 wake_up(&msm_uport->rx.wait);
1019}
1020
Mayank Rana55046232011-03-07 10:28:42 +05301021/*
1022 * Standard API, Stop receiver as soon as possible.
1023 *
1024 * Function immediately terminates the operation of the
1025 * channel receiver and any incoming characters are lost. None
1026 * of the receiver status bits are affected by this command and
1027 * characters that are already in the receive FIFO there.
1028 */
1029static void msm_hs_stop_rx_locked(struct uart_port *uport)
1030{
1031 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1032 unsigned int data;
1033
Mayank Rana55046232011-03-07 10:28:42 +05301034 /* disable dlink */
1035 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301036 if (is_blsp_uart(msm_uport))
1037 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1038 else
1039 data &= ~UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05301040 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
1041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001042 /* calling DMOV or CLOCK API. Hence mb() */
1043 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301044 /* Disable the receiver */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 if (msm_uport->rx.flush == FLUSH_NONE) {
1046 wake_lock(&msm_uport->rx.wake_lock);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301047 if (is_blsp_uart(msm_uport)) {
1048 msm_uport->rx.flush = FLUSH_STOP;
1049 /* workqueue for BAM rx endpoint disconnect */
1050 queue_work(msm_uport->hsuart_wq,
1051 &msm_uport->disconnect_rx_endpoint);
1052 } else {
1053 /* do discard flush */
1054 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
1055 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001056 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301057 if (!is_blsp_uart(msm_uport) && msm_uport->rx.flush != FLUSH_SHUTDOWN)
Mayank Rana55046232011-03-07 10:28:42 +05301058 msm_uport->rx.flush = FLUSH_STOP;
Saket Saurabh8b6b6af2013-02-19 16:04:16 +05301059
Mayank Rana55046232011-03-07 10:28:42 +05301060}
1061
1062/* Transmit the next chunk of data */
1063static void msm_hs_submit_tx_locked(struct uart_port *uport)
1064{
1065 int left;
1066 int tx_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001067 int aligned_tx_count;
Mayank Rana55046232011-03-07 10:28:42 +05301068 dma_addr_t src_addr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001069 dma_addr_t aligned_src_addr;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301070 u32 flags = SPS_IOVEC_FLAG_EOT;
Mayank Rana55046232011-03-07 10:28:42 +05301071 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1072 struct msm_hs_tx *tx = &msm_uport->tx;
1073 struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301074 struct sps_pipe *sps_pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05301075
1076 if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
1077 msm_hs_stop_tx_locked(uport);
1078 return;
1079 }
1080
1081 tx->dma_in_flight = 1;
1082
1083 tx_count = uart_circ_chars_pending(tx_buf);
1084
1085 if (UARTDM_TX_BUF_SIZE < tx_count)
1086 tx_count = UARTDM_TX_BUF_SIZE;
1087
1088 left = UART_XMIT_SIZE - tx_buf->tail;
1089
1090 if (tx_count > left)
1091 tx_count = left;
1092
1093 src_addr = tx->dma_base + tx_buf->tail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001094 /* Mask the src_addr to align on a cache
1095 * and add those bytes to tx_count */
1096 aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
1097 aligned_tx_count = tx_count + src_addr - aligned_src_addr;
1098
1099 dma_sync_single_for_device(uport->dev, aligned_src_addr,
1100 aligned_tx_count, DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301101
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301102 if (is_blsp_uart(msm_uport)) {
1103 /* Issue TX BAM Start IFC command */
1104 msm_hs_write(uport, UARTDM_CR_ADDR, START_TX_BAM_IFC);
1105 } else {
1106 tx->command_ptr->num_rows =
1107 (((tx_count + 15) >> 4) << 16) |
1108 ((tx_count + 15) >> 4);
1109 tx->command_ptr->src_row_addr = src_addr;
Mayank Rana55046232011-03-07 10:28:42 +05301110
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301111 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr,
1112 sizeof(dmov_box), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301113
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301114 *tx->command_ptr_ptr = CMD_PTR_LP |
1115 DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
1116 }
Mayank Rana55046232011-03-07 10:28:42 +05301117
Mayank Rana55046232011-03-07 10:28:42 +05301118 /* Save tx_count to use in Callback */
1119 tx->tx_count = tx_count;
1120 msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
1121
1122 /* Disable the tx_ready interrupt */
1123 msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
1124 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001125 /* Calling next DMOV API. Hence mb() here. */
1126 mb();
1127
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301128 msm_uport->tx.flush = FLUSH_NONE;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301129
1130 if (is_blsp_uart(msm_uport)) {
1131 sps_pipe_handle = tx->cons.pipe_handle;
1132 /* Queue transfer request to SPS */
1133 sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
1134 msm_uport, flags);
1135 } else {
1136 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
1137 sizeof(u32), DMA_TO_DEVICE);
1138
1139 msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
1140 }
Mayank Rana55046232011-03-07 10:28:42 +05301141}
1142
1143/* Start to receive the next chunk of data */
1144static void msm_hs_start_rx_locked(struct uart_port *uport)
1145{
1146 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301147 struct msm_hs_rx *rx = &msm_uport->rx;
1148 struct sps_pipe *sps_pipe_handle;
1149 u32 flags = SPS_IOVEC_FLAG_EOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001150 unsigned int buffer_pending = msm_uport->rx.buffer_pending;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301151 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152
1153 msm_uport->rx.buffer_pending = 0;
1154 if (buffer_pending && hs_serial_debug_mask)
1155 printk(KERN_ERR "Error: rx started in buffer state = %x",
1156 buffer_pending);
Mayank Rana55046232011-03-07 10:28:42 +05301157
1158 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
1159 msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
1160 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
1161 msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301162
1163 /*
1164 * Enable UARTDM Rx Interface as previously it has been
1165 * disable in set_termios before configuring baud rate.
1166 */
1167 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301168 if (is_blsp_uart(msm_uport)) {
1169 /* Enable UARTDM Rx BAM Interface */
1170 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1171 } else {
1172 data |= UARTDM_RX_DM_EN_BMSK;
1173 }
1174
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301175 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +05301176 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001177 /* Calling next DMOV API. Hence mb() here. */
1178 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301179
Mayank Rana05396b22013-03-16 19:10:11 +05301180 if (is_blsp_uart(msm_uport)) {
1181 /*
1182 * RX-transfer will be automatically re-activated
1183 * after last data of previous transfer was read.
1184 */
1185 data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
1186 RX_DMRX_CYCLIC_EN);
1187 msm_hs_write(uport, UARTDM_RX_TRANS_CTRL_ADDR, data);
1188 /* Issue RX BAM Start IFC command */
1189 msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
1190 mb();
1191 }
1192
Mayank Rana55046232011-03-07 10:28:42 +05301193 msm_uport->rx.flush = FLUSH_NONE;
Mayank Rana55046232011-03-07 10:28:42 +05301194
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301195 if (is_blsp_uart(msm_uport)) {
1196 sps_pipe_handle = rx->prod.pipe_handle;
1197 /* Queue transfer request to SPS */
1198 sps_transfer_one(sps_pipe_handle, rx->rbuffer,
1199 UARTDM_RX_BUF_SIZE, msm_uport, flags);
1200 } else {
1201 msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel,
1202 &msm_uport->rx.xfer);
1203 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204}
1205
1206static void flip_insert_work(struct work_struct *work)
1207{
1208 unsigned long flags;
1209 int retval;
1210 struct msm_hs_port *msm_uport =
1211 container_of(work, struct msm_hs_port,
1212 rx.flip_insert_work.work);
1213 struct tty_struct *tty = msm_uport->uport.state->port.tty;
1214
1215 spin_lock_irqsave(&msm_uport->uport.lock, flags);
1216 if (msm_uport->rx.buffer_pending == NONE_PENDING) {
1217 if (hs_serial_debug_mask)
1218 printk(KERN_ERR "Error: No buffer pending in %s",
1219 __func__);
1220 return;
1221 }
1222 if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
1223 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1224 if (retval)
1225 msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
1226 }
1227 if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
1228 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1229 if (retval)
1230 msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
1231 }
1232 if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
1233 int rx_count, rx_offset;
1234 rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
1235 rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
1236 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer +
1237 rx_offset, rx_count);
1238 msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
1239 PARITY_ERROR);
1240 if (retval != rx_count)
1241 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1242 retval << 8 | (rx_count - retval) << 16;
1243 }
1244 if (msm_uport->rx.buffer_pending)
1245 schedule_delayed_work(&msm_uport->rx.flip_insert_work,
1246 msecs_to_jiffies(RETRY_TIMEOUT));
1247 else
1248 if ((msm_uport->clk_state == MSM_HS_CLK_ON) &&
1249 (msm_uport->rx.flush <= FLUSH_IGNORE)) {
1250 if (hs_serial_debug_mask)
1251 printk(KERN_WARNING
1252 "msm_serial_hs: "
1253 "Pending buffers cleared. "
1254 "Restarting\n");
1255 msm_hs_start_rx_locked(&msm_uport->uport);
1256 }
1257 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1258 tty_flip_buffer_push(tty);
1259}
1260
1261static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr)
1262{
1263 int retval;
1264 int rx_count;
1265 unsigned long status;
1266 unsigned long flags;
1267 unsigned int error_f = 0;
1268 struct uart_port *uport;
1269 struct msm_hs_port *msm_uport;
1270 unsigned int flush;
1271 struct tty_struct *tty;
Mayank Rana05396b22013-03-16 19:10:11 +05301272 struct sps_event_notify *notify;
1273 struct msm_hs_rx *rx;
1274 struct sps_pipe *sps_pipe_handle;
1275 u32 sps_flags = SPS_IOVEC_FLAG_EOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001276
1277 msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
1278 struct msm_hs_port, rx.tlet);
1279 uport = &msm_uport->uport;
1280 tty = uport->state->port.tty;
Mayank Rana05396b22013-03-16 19:10:11 +05301281 notify = &msm_uport->notify;
1282 rx = &msm_uport->rx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283
1284 status = msm_hs_read(uport, UARTDM_SR_ADDR);
1285
1286 spin_lock_irqsave(&uport->lock, flags);
1287
Mayank Rana05396b22013-03-16 19:10:11 +05301288 if (!is_blsp_uart(msm_uport))
1289 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001290
1291 /* overflow is not connect to data in a FIFO */
1292 if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
1293 (uport->read_status_mask & CREAD))) {
1294 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1295 if (!retval)
1296 msm_uport->rx.buffer_pending |= TTY_OVERRUN;
1297 uport->icount.buf_overrun++;
1298 error_f = 1;
1299 }
1300
1301 if (!(uport->ignore_status_mask & INPCK))
1302 status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
1303
1304 if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
1305 /* Can not tell difference between parity & frame error */
Mayank Rana85aeee12012-11-27 14:49:46 +05301306 if (hs_serial_debug_mask)
1307 printk(KERN_WARNING "msm_serial_hs: parity error\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308 uport->icount.parity++;
1309 error_f = 1;
Mayank Rana85aeee12012-11-27 14:49:46 +05301310 if (!(uport->ignore_status_mask & IGNPAR)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001311 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1312 if (!retval)
1313 msm_uport->rx.buffer_pending |= TTY_PARITY;
1314 }
1315 }
1316
Mayank Rana85aeee12012-11-27 14:49:46 +05301317 if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
1318 if (hs_serial_debug_mask)
1319 printk(KERN_WARNING "msm_serial_hs: Rx break\n");
1320 uport->icount.brk++;
1321 error_f = 1;
1322 if (!(uport->ignore_status_mask & IGNBRK)) {
1323 retval = tty_insert_flip_char(tty, 0, TTY_BREAK);
1324 if (!retval)
1325 msm_uport->rx.buffer_pending |= TTY_BREAK;
1326 }
1327 }
1328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 if (error_f)
1330 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
1331
1332 if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
1333 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
1334 flush = msm_uport->rx.flush;
1335 if (flush == FLUSH_IGNORE)
1336 if (!msm_uport->rx.buffer_pending)
1337 msm_hs_start_rx_locked(uport);
1338
1339 if (flush == FLUSH_STOP) {
1340 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1341 wake_up(&msm_uport->rx.wait);
1342 }
1343 if (flush >= FLUSH_DATA_INVALID)
1344 goto out;
1345
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301346 if (is_blsp_uart(msm_uport)) {
Mayank Rana05396b22013-03-16 19:10:11 +05301347 rx_count = msm_uport->rx_count_callback;
1348 } else {
1349 rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
1350 /* order the read of rx.buffer */
1351 rmb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301352 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001353
1354 if (0 != (uport->read_status_mask & CREAD)) {
1355 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
1356 rx_count);
1357 if (retval != rx_count) {
1358 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1359 retval << 5 | (rx_count - retval) << 16;
1360 }
1361 }
1362
1363 /* order the read of rx.buffer and the start of next rx xfer */
1364 wmb();
1365
Mayank Rana05396b22013-03-16 19:10:11 +05301366 if (!msm_uport->rx.buffer_pending) {
1367 if (is_blsp_uart(msm_uport)) {
1368 msm_uport->rx.flush = FLUSH_NONE;
1369 sps_pipe_handle = rx->prod.pipe_handle;
1370 /* Queue transfer request to SPS */
1371 sps_transfer_one(sps_pipe_handle, rx->rbuffer,
1372 UARTDM_RX_BUF_SIZE, msm_uport, sps_flags);
1373 } else {
1374 msm_hs_start_rx_locked(uport);
1375 }
1376 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001377out:
1378 if (msm_uport->rx.buffer_pending) {
1379 if (hs_serial_debug_mask)
1380 printk(KERN_WARNING
1381 "msm_serial_hs: "
1382 "tty buffer exhausted. "
1383 "Stalling\n");
1384 schedule_delayed_work(&msm_uport->rx.flip_insert_work
1385 , msecs_to_jiffies(RETRY_TIMEOUT));
1386 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001387 /* release wakelock in 500ms, not immediately, because higher layers
1388 * don't always take wakelocks when they should */
1389 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
1390 /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
1391 spin_unlock_irqrestore(&uport->lock, flags);
1392 if (flush < FLUSH_DATA_INVALID)
1393 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05301394}
1395
1396/* Enable the transmitter Interrupt */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001397static void msm_hs_start_tx_locked(struct uart_port *uport )
Mayank Rana55046232011-03-07 10:28:42 +05301398{
1399 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1400
Mayank Rana55046232011-03-07 10:28:42 +05301401 if (msm_uport->tx.tx_ready_int_en == 0) {
1402 msm_uport->tx.tx_ready_int_en = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 if (msm_uport->tx.dma_in_flight == 0)
1404 msm_hs_submit_tx_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301405 }
Mayank Rana55046232011-03-07 10:28:42 +05301406}
1407
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301408/**
1409 * Callback notification from SPS driver
1410 *
1411 * This callback function gets triggered called from
1412 * SPS driver when requested SPS data transfer is
1413 * completed.
1414 *
1415 */
1416
1417static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
1418{
1419 struct msm_hs_port *msm_uport =
1420 (struct msm_hs_port *)
1421 ((struct sps_event_notify *)notify)->user;
1422
1423 msm_uport->notify = *notify;
1424 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1425 __func__, notify->event_id,
1426 notify->data.transfer.iovec.addr,
1427 notify->data.transfer.iovec.size,
1428 notify->data.transfer.iovec.flags);
1429
1430 tasklet_schedule(&msm_uport->tx.tlet);
1431}
1432
Mayank Rana55046232011-03-07 10:28:42 +05301433/*
1434 * This routine is called when we are done with a DMA transfer
1435 *
1436 * This routine is registered with Data mover when we set
1437 * up a Data Mover transfer. It is called from Data mover ISR
1438 * when the DMA transfer is done.
1439 */
1440static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr,
1441 unsigned int result,
1442 struct msm_dmov_errdata *err)
1443{
Mayank Rana55046232011-03-07 10:28:42 +05301444 struct msm_hs_port *msm_uport;
1445
Mayank Rana55046232011-03-07 10:28:42 +05301446 msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301447 if (msm_uport->tx.flush == FLUSH_STOP)
1448 /* DMA FLUSH unsuccesfful */
1449 WARN_ON(!(result & DMOV_RSLT_FLUSH));
1450 else
1451 /* DMA did not finish properly */
1452 WARN_ON(!(result & DMOV_RSLT_DONE));
Mayank Rana55046232011-03-07 10:28:42 +05301453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 tasklet_schedule(&msm_uport->tx.tlet);
1455}
1456
1457static void msm_serial_hs_tx_tlet(unsigned long tlet_ptr)
1458{
1459 unsigned long flags;
1460 struct msm_hs_port *msm_uport = container_of((struct tasklet_struct *)
1461 tlet_ptr, struct msm_hs_port, tx.tlet);
1462
1463 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301464 if (msm_uport->tx.flush == FLUSH_STOP) {
1465 msm_uport->tx.flush = FLUSH_SHUTDOWN;
1466 wake_up(&msm_uport->tx.wait);
1467 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1468 return;
1469 }
Mayank Rana55046232011-03-07 10:28:42 +05301470
1471 msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001472 msm_hs_write(&(msm_uport->uport), UARTDM_IMR_ADDR, msm_uport->imr_reg);
1473 /* Calling clk API. Hence mb() requires. */
1474 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001476 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
Mayank Rana55046232011-03-07 10:28:42 +05301477}
1478
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301479/**
1480 * Callback notification from SPS driver
1481 *
1482 * This callback function gets triggered called from
1483 * SPS driver when requested SPS data transfer is
1484 * completed.
1485 *
1486 */
1487
1488static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
1489{
1490
1491 struct msm_hs_port *msm_uport =
1492 (struct msm_hs_port *)
1493 ((struct sps_event_notify *)notify)->user;
Mayank Rana05396b22013-03-16 19:10:11 +05301494 struct uart_port *uport;
1495 unsigned long flags;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301496
Mayank Rana05396b22013-03-16 19:10:11 +05301497 uport = &(msm_uport->uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301498 msm_uport->notify = *notify;
1499 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1500 __func__, notify->event_id,
1501 notify->data.transfer.iovec.addr,
1502 notify->data.transfer.iovec.size,
1503 notify->data.transfer.iovec.flags);
1504
Mayank Rana05396b22013-03-16 19:10:11 +05301505 if (msm_uport->rx.flush == FLUSH_NONE) {
1506 spin_lock_irqsave(&uport->lock, flags);
1507 msm_uport->rx_count_callback = notify->data.transfer.iovec.size;
1508 spin_unlock_irqrestore(&uport->lock, flags);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301509 tasklet_schedule(&msm_uport->rx.tlet);
Mayank Rana05396b22013-03-16 19:10:11 +05301510 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301511}
1512
Mayank Rana55046232011-03-07 10:28:42 +05301513/*
1514 * This routine is called when we are done with a DMA transfer or the
1515 * a flush has been sent to the data mover driver.
1516 *
1517 * This routine is registered with Data mover when we set up a Data Mover
1518 * transfer. It is called from Data mover ISR when the DMA transfer is done.
1519 */
1520static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
1521 unsigned int result,
1522 struct msm_dmov_errdata *err)
1523{
Mayank Rana55046232011-03-07 10:28:42 +05301524 struct msm_hs_port *msm_uport;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301525 struct uart_port *uport;
1526 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301527
1528 msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
Mayank Rana9c8bda92013-02-28 11:58:04 +05301529 uport = &(msm_uport->uport);
1530
1531 pr_debug("%s(): called result:%x\n", __func__, result);
1532 if (!(result & DMOV_RSLT_ERROR)) {
1533 if (result & DMOV_RSLT_FLUSH) {
1534 if (msm_uport->rx_discard_flush_issued) {
1535 spin_lock_irqsave(&uport->lock, flags);
1536 msm_uport->rx_discard_flush_issued = false;
1537 spin_unlock_irqrestore(&uport->lock, flags);
1538 wake_up(&msm_uport->rx.wait);
1539 }
1540 }
1541 }
Mayank Rana55046232011-03-07 10:28:42 +05301542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001543 tasklet_schedule(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05301544}
1545
1546/*
1547 * Standard API, Current states of modem control inputs
1548 *
1549 * Since CTS can be handled entirely by HARDWARE we always
1550 * indicate clear to send and count on the TX FIFO to block when
1551 * it fills up.
1552 *
1553 * - TIOCM_DCD
1554 * - TIOCM_CTS
1555 * - TIOCM_DSR
1556 * - TIOCM_RI
1557 * (Unsupported) DCD and DSR will return them high. RI will return low.
1558 */
1559static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
1560{
1561 return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
1562}
1563
1564/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001565 * Standard API, Set or clear RFR_signal
1566 *
1567 * Set RFR high, (Indicate we are not ready for data), we disable auto
1568 * ready for receiving and then set RFR_N high. To set RFR to low we just turn
1569 * back auto ready for receiving and it should lower RFR signal
1570 * when hardware is ready
Mayank Rana55046232011-03-07 10:28:42 +05301571 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001572void msm_hs_set_mctrl_locked(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +05301573 unsigned int mctrl)
1574{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001575 unsigned int set_rts;
1576 unsigned int data;
Mayank Rana55046232011-03-07 10:28:42 +05301577
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001578 /* RTS is active low */
1579 set_rts = TIOCM_RTS & mctrl ? 0 : 1;
Mayank Rana55046232011-03-07 10:28:42 +05301580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
1582 if (set_rts) {
1583 /*disable auto ready-for-receiving */
1584 data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
1585 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1586 /* set RFR_N to high */
1587 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
1588 } else {
1589 /* Enable auto ready-for-receiving */
1590 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1591 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1592 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301594}
1595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596void msm_hs_set_mctrl(struct uart_port *uport,
1597 unsigned int mctrl)
1598{
1599 unsigned long flags;
1600
1601 spin_lock_irqsave(&uport->lock, flags);
1602 msm_hs_set_mctrl_locked(uport, mctrl);
1603 spin_unlock_irqrestore(&uport->lock, flags);
1604}
1605EXPORT_SYMBOL(msm_hs_set_mctrl);
1606
Mayank Rana55046232011-03-07 10:28:42 +05301607/* Standard API, Enable modem status (CTS) interrupt */
1608static void msm_hs_enable_ms_locked(struct uart_port *uport)
1609{
1610 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1611
Mayank Rana55046232011-03-07 10:28:42 +05301612 /* Enable DELTA_CTS Interrupt */
1613 msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
1614 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001615 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301616
1617}
1618
Saket Saurabhce394102012-10-29 19:51:28 +05301619static void msm_hs_flush_buffer(struct uart_port *uport)
1620{
1621 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1622
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301623 if (msm_uport->tx.dma_in_flight)
1624 msm_uport->tty_flush_receive = true;
Saket Saurabhce394102012-10-29 19:51:28 +05301625}
1626
Mayank Rana55046232011-03-07 10:28:42 +05301627/*
1628 * Standard API, Break Signal
1629 *
1630 * Control the transmission of a break signal. ctl eq 0 => break
1631 * signal terminate ctl ne 0 => start break signal
1632 */
1633static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
1634{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301636
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301638 msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001639 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301641}
1642
1643static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
1644{
1645 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001646 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301647
Mayank Rana55046232011-03-07 10:28:42 +05301648 if (cfg_flags & UART_CONFIG_TYPE) {
1649 uport->type = PORT_MSM;
1650 msm_hs_request_port(uport);
1651 }
Mayank Ranabbfd2692011-09-20 08:51:17 +05301652
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001653 if (is_gsbi_uart(msm_uport)) {
Mayank Rana00b6bff2011-08-17 08:33:42 +05301654 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301655 clk_prepare_enable(msm_uport->pclk);
1656 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 iowrite32(GSBI_PROTOCOL_UART, msm_uport->mapped_gsbi +
1658 GSBI_CONTROL_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301659 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana00b6bff2011-08-17 08:33:42 +05301660 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301661 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 }
Mayank Rana55046232011-03-07 10:28:42 +05301663}
1664
1665/* Handle CTS changes (Called from interrupt handler) */
Mayank Ranaee815f32011-12-08 09:06:09 +05301666static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301667{
Mayank Rana55046232011-03-07 10:28:42 +05301668 /* clear interrupt */
1669 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 /* Calling CLOCK API. Hence mb() requires here. */
1671 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301672 uport->icount.cts++;
1673
Mayank Rana55046232011-03-07 10:28:42 +05301674 /* clear the IOCTL TIOCMIWAIT if called */
1675 wake_up_interruptible(&uport->state->port.delta_msr_wait);
1676}
1677
1678/* check if the TX path is flushed, and if so clock off
1679 * returns 0 did not clock off, need to retry (still sending final byte)
1680 * -1 did not clock off, do not retry
1681 * 1 if we clocked off
1682 */
Mayank Ranacb589d82012-03-01 11:50:03 +05301683static int msm_hs_check_clock_off(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301684{
1685 unsigned long sr_status;
Mayank Ranacb589d82012-03-01 11:50:03 +05301686 unsigned long flags;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301687 int ret;
Mayank Rana55046232011-03-07 10:28:42 +05301688 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1689 struct circ_buf *tx_buf = &uport->state->xmit;
Mayank Rana05396b22013-03-16 19:10:11 +05301690 struct msm_hs_rx *rx = &msm_uport->rx;
1691 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05301692
Mayank Ranacb589d82012-03-01 11:50:03 +05301693 mutex_lock(&msm_uport->clk_mutex);
1694 spin_lock_irqsave(&uport->lock, flags);
1695
Mayank Rana55046232011-03-07 10:28:42 +05301696 /* Cancel if tx tty buffer is not empty, dma is in flight,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001697 * or tx fifo is not empty */
Mayank Rana55046232011-03-07 10:28:42 +05301698 if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
1699 !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001700 msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) {
Mayank Ranacb589d82012-03-01 11:50:03 +05301701 spin_unlock_irqrestore(&uport->lock, flags);
1702 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301703 return -1;
1704 }
1705
1706 /* Make sure the uart is finished with the last byte */
1707 sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301708 if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) {
1709 spin_unlock_irqrestore(&uport->lock, flags);
1710 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301711 return 0; /* retry */
Mayank Ranacb589d82012-03-01 11:50:03 +05301712 }
Mayank Rana55046232011-03-07 10:28:42 +05301713
1714 /* Make sure forced RXSTALE flush complete */
1715 switch (msm_uport->clk_req_off_state) {
1716 case CLK_REQ_OFF_START:
1717 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
Mayank Rana05396b22013-03-16 19:10:11 +05301718
1719 if (!is_blsp_uart(msm_uport)) {
1720 msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
1721 /*
1722 * Before returning make sure that device writel
1723 * completed. Hence mb() requires here.
1724 */
1725 mb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301726 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301727 spin_unlock_irqrestore(&uport->lock, flags);
1728 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301729 return 0; /* RXSTALE flush not complete - retry */
1730 case CLK_REQ_OFF_RXSTALE_ISSUED:
1731 case CLK_REQ_OFF_FLUSH_ISSUED:
Mayank Ranacb589d82012-03-01 11:50:03 +05301732 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana05396b22013-03-16 19:10:11 +05301733 if (is_blsp_uart(msm_uport)) {
1734 msm_uport->clk_req_off_state =
1735 CLK_REQ_OFF_RXSTALE_FLUSHED;
1736 sps_disconnect(sps_pipe_handle);
1737 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301738 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301739 return 0; /* RXSTALE flush not complete - retry */
1740 case CLK_REQ_OFF_RXSTALE_FLUSHED:
1741 break; /* continue */
1742 }
1743
1744 if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
Mayank Rana9c8bda92013-02-28 11:58:04 +05301745 if (msm_uport->rx.flush == FLUSH_NONE) {
Mayank Rana55046232011-03-07 10:28:42 +05301746 msm_hs_stop_rx_locked(uport);
Saket Saurabh467614f2013-03-16 17:24:12 +05301747 if (!is_blsp_uart(msm_uport))
1748 msm_uport->rx_discard_flush_issued = true;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301749 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301750
1751 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana9c8bda92013-02-28 11:58:04 +05301752 if (msm_uport->rx_discard_flush_issued) {
1753 pr_debug("%s(): wainting for flush completion.\n",
1754 __func__);
1755 ret = wait_event_timeout(msm_uport->rx.wait,
1756 msm_uport->rx_discard_flush_issued == false,
1757 RX_FLUSH_COMPLETE_TIMEOUT);
1758 if (!ret)
1759 pr_err("%s(): Flush complete pending.\n",
1760 __func__);
1761 }
1762
Mayank Ranacb589d82012-03-01 11:50:03 +05301763 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301764 return 0; /* come back later to really clock off */
1765 }
1766
Mayank Rana55046232011-03-07 10:28:42 +05301767 spin_unlock_irqrestore(&uport->lock, flags);
1768
Mayank Rana55046232011-03-07 10:28:42 +05301769 /* we really want to clock off */
Mayank Ranacb589d82012-03-01 11:50:03 +05301770 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001771 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301772 clk_disable_unprepare(msm_uport->pclk);
1773
Mayank Rana55046232011-03-07 10:28:42 +05301774 msm_uport->clk_state = MSM_HS_CLK_OFF;
Mayank Ranacb589d82012-03-01 11:50:03 +05301775
1776 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001777 if (use_low_power_wakeup(msm_uport)) {
1778 msm_uport->wakeup.ignore = 1;
1779 enable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05301780 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001781 wake_unlock(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +05301782
1783 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301784
1785 /* Reset PNOC Bus Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301786 msm_hs_bus_voting(msm_uport, BUS_RESET);
Mayank Ranacb589d82012-03-01 11:50:03 +05301787 mutex_unlock(&msm_uport->clk_mutex);
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301788
Mayank Rana55046232011-03-07 10:28:42 +05301789 return 1;
1790}
1791
Mayank Ranacb589d82012-03-01 11:50:03 +05301792static void hsuart_clock_off_work(struct work_struct *w)
1793{
1794 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
1795 clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301796 struct uart_port *uport = &msm_uport->uport;
1797
Mayank Ranacb589d82012-03-01 11:50:03 +05301798 if (!msm_hs_check_clock_off(uport)) {
1799 hrtimer_start(&msm_uport->clk_off_timer,
1800 msm_uport->clk_off_delay,
1801 HRTIMER_MODE_REL);
Mayank Rana55046232011-03-07 10:28:42 +05301802 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301803}
Mayank Rana55046232011-03-07 10:28:42 +05301804
Mayank Ranacb589d82012-03-01 11:50:03 +05301805static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
1806{
1807 struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
1808 clk_off_timer);
Mayank Rana55046232011-03-07 10:28:42 +05301809
Mayank Ranacb589d82012-03-01 11:50:03 +05301810 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
1811 return HRTIMER_NORESTART;
Mayank Rana55046232011-03-07 10:28:42 +05301812}
1813
1814static irqreturn_t msm_hs_isr(int irq, void *dev)
1815{
1816 unsigned long flags;
1817 unsigned long isr_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001818 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05301819 struct uart_port *uport = &msm_uport->uport;
1820 struct circ_buf *tx_buf = &uport->state->xmit;
1821 struct msm_hs_tx *tx = &msm_uport->tx;
1822 struct msm_hs_rx *rx = &msm_uport->rx;
1823
1824 spin_lock_irqsave(&uport->lock, flags);
1825
1826 isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
1827
1828 /* Uart RX starting */
1829 if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */
Mayank Rana55046232011-03-07 10:28:42 +05301831 msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
1832 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001833 /* Complete device write for IMR. Hence mb() requires. */
1834 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301835 }
1836 /* Stale rx interrupt */
1837 if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
1838 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
1839 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001840 /*
1841 * Complete device write before calling DMOV API. Hence
1842 * mb() requires here.
1843 */
1844 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301845
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301846 if (msm_uport->clk_req_off_state ==
Mayank Rana05396b22013-03-16 19:10:11 +05301847 CLK_REQ_OFF_RXSTALE_ISSUED)
Mayank Rana55046232011-03-07 10:28:42 +05301848 msm_uport->clk_req_off_state =
Mayank Rana05396b22013-03-16 19:10:11 +05301849 CLK_REQ_OFF_FLUSH_ISSUED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001850
Mayank Rana05396b22013-03-16 19:10:11 +05301851 if (!is_blsp_uart(msm_uport) && (rx->flush == FLUSH_NONE)) {
Mayank Rana55046232011-03-07 10:28:42 +05301852 rx->flush = FLUSH_DATA_READY;
Mayank Rana05396b22013-03-16 19:10:11 +05301853 msm_dmov_flush(msm_uport->dma_rx_channel, 1);
Mayank Rana55046232011-03-07 10:28:42 +05301854 }
1855 }
1856 /* tx ready interrupt */
1857 if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
1858 /* Clear TX Ready */
1859 msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
1860
1861 if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
1862 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1863 msm_hs_write(uport, UARTDM_IMR_ADDR,
1864 msm_uport->imr_reg);
1865 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001866 /*
1867 * Complete both writes before starting new TX.
1868 * Hence mb() requires here.
1869 */
1870 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301871 /* Complete DMA TX transactions and submit new transactions */
Saket Saurabhce394102012-10-29 19:51:28 +05301872
1873 /* Do not update tx_buf.tail if uart_flush_buffer already
1874 called in serial core */
1875 if (!msm_uport->tty_flush_receive)
1876 tx_buf->tail = (tx_buf->tail +
1877 tx->tx_count) & ~UART_XMIT_SIZE;
1878 else
1879 msm_uport->tty_flush_receive = false;
Mayank Rana55046232011-03-07 10:28:42 +05301880
1881 tx->dma_in_flight = 0;
1882
1883 uport->icount.tx += tx->tx_count;
1884 if (tx->tx_ready_int_en)
1885 msm_hs_submit_tx_locked(uport);
1886
1887 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
1888 uart_write_wakeup(uport);
1889 }
1890 if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
1891 /* TX FIFO is empty */
1892 msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
1893 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001894 /*
1895 * Complete device write before starting clock_off request.
1896 * Hence mb() requires here.
1897 */
1898 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +05301899 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301900 }
1901
1902 /* Change in CTS interrupt */
1903 if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
Mayank Ranaee815f32011-12-08 09:06:09 +05301904 msm_hs_handle_delta_cts_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301905
1906 spin_unlock_irqrestore(&uport->lock, flags);
1907
1908 return IRQ_HANDLED;
1909}
1910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001911/* request to turn off uart clock once pending TX is flushed */
1912void msm_hs_request_clock_off(struct uart_port *uport) {
1913 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301914 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001916 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301917 if (msm_uport->clk_state == MSM_HS_CLK_ON) {
1918 msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
1919 msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
Mayank Rana55046232011-03-07 10:28:42 +05301920 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1921 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001922 /*
1923 * Complete device write before retuning back.
1924 * Hence mb() requires here.
1925 */
1926 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301927 }
Mayank Rana55046232011-03-07 10:28:42 +05301928 spin_unlock_irqrestore(&uport->lock, flags);
1929}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001930EXPORT_SYMBOL(msm_hs_request_clock_off);
Mayank Rana55046232011-03-07 10:28:42 +05301931
Mayank Ranacb589d82012-03-01 11:50:03 +05301932void msm_hs_request_clock_on(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301933{
1934 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Ranacb589d82012-03-01 11:50:03 +05301935 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301936 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001937 int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +05301938
Mayank Ranacb589d82012-03-01 11:50:03 +05301939 mutex_lock(&msm_uport->clk_mutex);
1940 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301941
1942 switch (msm_uport->clk_state) {
1943 case MSM_HS_CLK_OFF:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001944 wake_lock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001945 disable_irq_nosync(msm_uport->wakeup.irq);
Mayank Ranacb589d82012-03-01 11:50:03 +05301946 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301947
1948 /* Vote for PNOC BUS Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301949 msm_hs_bus_voting(msm_uport, BUS_SCALING);
Mayank Rana88d49142013-01-16 17:28:53 +05301950
Mayank Ranacb589d82012-03-01 11:50:03 +05301951 ret = clk_prepare_enable(msm_uport->clk);
1952 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001953 dev_err(uport->dev, "Clock ON Failure"
Mayank Ranacb589d82012-03-01 11:50:03 +05301954 "For UART CLK Stalling HSUART\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001955 break;
1956 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301957
1958 if (msm_uport->pclk) {
1959 ret = clk_prepare_enable(msm_uport->pclk);
1960 if (unlikely(ret)) {
1961 clk_disable_unprepare(msm_uport->clk);
1962 dev_err(uport->dev, "Clock ON Failure"
1963 "For UART Pclk Stalling HSUART\n");
1964 break;
1965 }
1966 }
1967 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001968 /* else fall-through */
Mayank Rana55046232011-03-07 10:28:42 +05301969 case MSM_HS_CLK_REQUEST_OFF:
1970 if (msm_uport->rx.flush == FLUSH_STOP ||
1971 msm_uport->rx.flush == FLUSH_SHUTDOWN) {
1972 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
1973 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301974 if (is_blsp_uart(msm_uport))
1975 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1976 else
1977 data |= UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05301978 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001979 /* Complete above device write. Hence mb() here. */
1980 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301981 }
1982 hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
Mayank Rana05396b22013-03-16 19:10:11 +05301983 if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
1984 if (is_blsp_uart(msm_uport)) {
1985 spin_unlock_irqrestore(&uport->lock, flags);
1986 msm_hs_spsconnect_rx(uport);
1987 spin_lock_irqsave(&uport->lock, flags);
1988 }
Mayank Rana55046232011-03-07 10:28:42 +05301989 msm_hs_start_rx_locked(uport);
Mayank Rana05396b22013-03-16 19:10:11 +05301990 }
Mayank Rana55046232011-03-07 10:28:42 +05301991 if (msm_uport->rx.flush == FLUSH_STOP)
1992 msm_uport->rx.flush = FLUSH_IGNORE;
1993 msm_uport->clk_state = MSM_HS_CLK_ON;
1994 break;
1995 case MSM_HS_CLK_ON:
1996 break;
1997 case MSM_HS_CLK_PORT_OFF:
1998 break;
1999 }
Mayank Rana55046232011-03-07 10:28:42 +05302000
Mayank Rana55046232011-03-07 10:28:42 +05302001 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Ranacb589d82012-03-01 11:50:03 +05302002 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05302003}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002004EXPORT_SYMBOL(msm_hs_request_clock_on);
Mayank Rana55046232011-03-07 10:28:42 +05302005
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002006static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
Mayank Rana55046232011-03-07 10:28:42 +05302007{
2008 unsigned int wakeup = 0;
2009 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002010 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05302011 struct uart_port *uport = &msm_uport->uport;
2012 struct tty_struct *tty = NULL;
2013
2014 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002015 if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
2016 /* ignore the first irq - it is a pending irq that occured
Mayank Rana55046232011-03-07 10:28:42 +05302017 * before enable_irq() */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018 if (msm_uport->wakeup.ignore)
2019 msm_uport->wakeup.ignore = 0;
Mayank Rana55046232011-03-07 10:28:42 +05302020 else
2021 wakeup = 1;
2022 }
2023
2024 if (wakeup) {
2025 /* the uart was clocked off during an rx, wake up and
2026 * optionally inject char into tty rx */
Mayank Ranacb589d82012-03-01 11:50:03 +05302027 spin_unlock_irqrestore(&uport->lock, flags);
2028 msm_hs_request_clock_on(uport);
2029 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030 if (msm_uport->wakeup.inject_rx) {
Mayank Rana55046232011-03-07 10:28:42 +05302031 tty = uport->state->port.tty;
2032 tty_insert_flip_char(tty,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002033 msm_uport->wakeup.rx_to_inject,
Mayank Rana55046232011-03-07 10:28:42 +05302034 TTY_NORMAL);
Mayank Rana55046232011-03-07 10:28:42 +05302035 }
2036 }
2037
2038 spin_unlock_irqrestore(&uport->lock, flags);
2039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002040 if (wakeup && msm_uport->wakeup.inject_rx)
2041 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05302042 return IRQ_HANDLED;
2043}
2044
2045static const char *msm_hs_type(struct uart_port *port)
2046{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002047 return ("MSM HS UART");
Mayank Rana55046232011-03-07 10:28:42 +05302048}
2049
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302050/**
2051 * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
2052 * @uport: uart port
2053 */
2054static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
2055{
2056 struct platform_device *pdev = to_platform_device(uport->dev);
2057 const struct msm_serial_hs_platform_data *pdata =
2058 pdev->dev.platform_data;
2059
2060 if (pdata) {
2061 if (gpio_is_valid(pdata->uart_tx_gpio))
2062 gpio_free(pdata->uart_tx_gpio);
2063 if (gpio_is_valid(pdata->uart_rx_gpio))
2064 gpio_free(pdata->uart_rx_gpio);
2065 if (gpio_is_valid(pdata->uart_cts_gpio))
2066 gpio_free(pdata->uart_cts_gpio);
2067 if (gpio_is_valid(pdata->uart_rfr_gpio))
2068 gpio_free(pdata->uart_rfr_gpio);
2069 } else {
2070 pr_err("Error:Pdata is NULL.\n");
2071 }
2072}
2073
2074/**
2075 * msm_hs_config_uart_gpios - Configures UART GPIOs
2076 * @uport: uart port
2077 */
2078static int msm_hs_config_uart_gpios(struct uart_port *uport)
2079{
2080 struct platform_device *pdev = to_platform_device(uport->dev);
2081 const struct msm_serial_hs_platform_data *pdata =
2082 pdev->dev.platform_data;
2083 int ret = 0;
2084
2085 if (pdata) {
2086 if (gpio_is_valid(pdata->uart_tx_gpio)) {
2087 ret = gpio_request(pdata->uart_tx_gpio,
2088 "UART_TX_GPIO");
2089 if (unlikely(ret)) {
2090 pr_err("gpio request failed for:%d\n",
2091 pdata->uart_tx_gpio);
2092 goto exit_uart_config;
2093 }
2094 }
2095
2096 if (gpio_is_valid(pdata->uart_rx_gpio)) {
2097 ret = gpio_request(pdata->uart_rx_gpio,
2098 "UART_RX_GPIO");
2099 if (unlikely(ret)) {
2100 pr_err("gpio request failed for:%d\n",
2101 pdata->uart_rx_gpio);
2102 goto uart_tx_unconfig;
2103 }
2104 }
2105
2106 if (gpio_is_valid(pdata->uart_cts_gpio)) {
2107 ret = gpio_request(pdata->uart_cts_gpio,
2108 "UART_CTS_GPIO");
2109 if (unlikely(ret)) {
2110 pr_err("gpio request failed for:%d\n",
2111 pdata->uart_cts_gpio);
2112 goto uart_rx_unconfig;
2113 }
2114 }
2115
2116 if (gpio_is_valid(pdata->uart_rfr_gpio)) {
2117 ret = gpio_request(pdata->uart_rfr_gpio,
2118 "UART_RFR_GPIO");
2119 if (unlikely(ret)) {
2120 pr_err("gpio request failed for:%d\n",
2121 pdata->uart_rfr_gpio);
2122 goto uart_cts_unconfig;
2123 }
2124 }
2125 } else {
2126 pr_err("Pdata is NULL.\n");
2127 ret = -EINVAL;
2128 }
2129 return ret;
2130
2131uart_cts_unconfig:
2132 if (gpio_is_valid(pdata->uart_cts_gpio))
2133 gpio_free(pdata->uart_cts_gpio);
2134uart_rx_unconfig:
2135 if (gpio_is_valid(pdata->uart_rx_gpio))
2136 gpio_free(pdata->uart_rx_gpio);
2137uart_tx_unconfig:
2138 if (gpio_is_valid(pdata->uart_tx_gpio))
2139 gpio_free(pdata->uart_tx_gpio);
2140exit_uart_config:
2141 return ret;
2142}
2143
Mayank Rana55046232011-03-07 10:28:42 +05302144/* Called when port is opened */
2145static int msm_hs_startup(struct uart_port *uport)
2146{
2147 int ret;
2148 int rfr_level;
2149 unsigned long flags;
2150 unsigned int data;
2151 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05302152 struct platform_device *pdev = to_platform_device(uport->dev);
2153 const struct msm_serial_hs_platform_data *pdata =
2154 pdev->dev.platform_data;
Mayank Rana55046232011-03-07 10:28:42 +05302155 struct circ_buf *tx_buf = &uport->state->xmit;
2156 struct msm_hs_tx *tx = &msm_uport->tx;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302157 struct msm_hs_rx *rx = &msm_uport->rx;
2158 struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
2159 struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05302160
2161 rfr_level = uport->fifosize;
2162 if (rfr_level > 16)
2163 rfr_level -= 16;
2164
2165 tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
2166 DMA_TO_DEVICE);
2167
Mayank Rana679436e2012-03-31 05:41:14 +05302168 wake_lock(&msm_uport->dma_wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +05302169 /* turn on uart clk */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002170 ret = msm_hs_init_clk(uport);
Mayank Rana55046232011-03-07 10:28:42 +05302171 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302172 pr_err("Turning ON uartclk error\n");
2173 wake_unlock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05302175 }
2176
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302177 if (is_blsp_uart(msm_uport)) {
2178 ret = msm_hs_config_uart_gpios(uport);
2179 if (ret) {
2180 pr_err("Uart GPIO request failed\n");
2181 goto deinit_uart_clk;
2182 }
2183 } else {
2184 if (pdata && pdata->gpio_config)
2185 if (unlikely(pdata->gpio_config(1)))
2186 dev_err(uport->dev, "Cannot configure gpios\n");
2187 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302188
2189 /* SPS Connect for BAM endpoints */
2190 if (is_blsp_uart(msm_uport)) {
2191 /* SPS connect for TX */
2192 ret = msm_hs_spsconnect_tx(uport);
2193 if (ret) {
2194 pr_err("msm_serial_hs: SPS connect failed for TX");
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302195 goto unconfig_uart_gpios;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302196 }
2197
2198 /* SPS connect for RX */
2199 ret = msm_hs_spsconnect_rx(uport);
2200 if (ret) {
2201 pr_err("msm_serial_hs: SPS connect failed for RX");
2202 goto sps_disconnect_tx;
2203 }
2204 }
2205
Mayank Rana55046232011-03-07 10:28:42 +05302206 /* Set auto RFR Level */
2207 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
2208 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
2209 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
2210 data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
2211 data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
2212 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
2213
2214 /* Make sure RXSTALE count is non-zero */
2215 data = msm_hs_read(uport, UARTDM_IPR_ADDR);
2216 if (!data) {
2217 data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
2218 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
2219 }
2220
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302221 if (is_blsp_uart(msm_uport)) {
2222 /* Enable BAM mode */
2223 data = UARTDM_TX_BAM_ENABLE_BMSK | UARTDM_RX_BAM_ENABLE_BMSK;
2224 } else {
2225 /* Enable Data Mover Mode */
2226 data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
2227 }
Mayank Rana55046232011-03-07 10:28:42 +05302228 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
2229
2230 /* Reset TX */
2231 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
2232 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
2233 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
2234 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
2235 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
2236 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
2237 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
2238 /* Turn on Uart Receiver */
2239 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
2240
2241 /* Turn on Uart Transmitter */
2242 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
2243
2244 /* Initialize the tx */
2245 tx->tx_ready_int_en = 0;
2246 tx->dma_in_flight = 0;
2247
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302248 if (!is_blsp_uart(msm_uport)) {
2249 tx->xfer.complete_func = msm_hs_dmov_tx_callback;
Mayank Rana55046232011-03-07 10:28:42 +05302250
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302251 tx->command_ptr->cmd = CMD_LC |
2252 CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
Mayank Rana55046232011-03-07 10:28:42 +05302253
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302254 tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
Mayank Rana55046232011-03-07 10:28:42 +05302255 | (MSM_UARTDM_BURST_SIZE);
2256
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302257 tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16);
Mayank Rana55046232011-03-07 10:28:42 +05302258
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302259 tx->command_ptr->dst_row_addr =
2260 msm_uport->uport.mapbase + UARTDM_TF_ADDR;
Mayank Rana05396b22013-03-16 19:10:11 +05302261
2262 msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302263 }
Mayank Rana55046232011-03-07 10:28:42 +05302264
Mayank Rana55046232011-03-07 10:28:42 +05302265 /* Enable reading the current CTS, no harm even if CTS is ignored */
2266 msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
2267
2268 msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002269 /*
2270 * Complete all device write related configuration before
2271 * queuing RX request. Hence mb() requires here.
2272 */
2273 mb();
Mayank Rana55046232011-03-07 10:28:42 +05302274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002275 if (use_low_power_wakeup(msm_uport)) {
2276 ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
Mayank Rana679436e2012-03-31 05:41:14 +05302277 if (unlikely(ret)) {
2278 pr_err("%s():Err setting wakeup irq\n", __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302279 goto sps_disconnect_rx;
Mayank Rana679436e2012-03-31 05:41:14 +05302280 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002281 }
Mayank Rana55046232011-03-07 10:28:42 +05302282
2283 ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
2284 "msm_hs_uart", msm_uport);
2285 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302286 pr_err("%s():Error getting uart irq\n", __func__);
2287 goto free_wake_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302288 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002289 if (use_low_power_wakeup(msm_uport)) {
Mayank Ranacb589d82012-03-01 11:50:03 +05302290
2291 ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
2292 msm_hs_wakeup_isr,
2293 IRQF_TRIGGER_FALLING,
2294 "msm_hs_wakeup", msm_uport);
2295
Mayank Rana55046232011-03-07 10:28:42 +05302296 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302297 pr_err("%s():Err getting uart wakeup_irq\n", __func__);
2298 goto free_uart_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302299 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002300 disable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05302301 }
2302
Mayank Rana88d49142013-01-16 17:28:53 +05302303 /* Vote for PNOC BUS Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302304 msm_hs_bus_voting(msm_uport, BUS_SCALING);
Mayank Rana88d49142013-01-16 17:28:53 +05302305
Mayank Rana55046232011-03-07 10:28:42 +05302306 spin_lock_irqsave(&uport->lock, flags);
2307
Mayank Rana55046232011-03-07 10:28:42 +05302308 msm_hs_start_rx_locked(uport);
2309
2310 spin_unlock_irqrestore(&uport->lock, flags);
2311 ret = pm_runtime_set_active(uport->dev);
2312 if (ret)
2313 dev_err(uport->dev, "set active error:%d\n", ret);
2314 pm_runtime_enable(uport->dev);
2315
2316 return 0;
2317
Mayank Rana679436e2012-03-31 05:41:14 +05302318free_uart_irq:
2319 free_irq(uport->irq, msm_uport);
2320free_wake_irq:
2321 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302322sps_disconnect_rx:
2323 if (is_blsp_uart(msm_uport))
2324 sps_disconnect(sps_pipe_handle_rx);
2325sps_disconnect_tx:
2326 if (is_blsp_uart(msm_uport))
2327 sps_disconnect(sps_pipe_handle_tx);
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302328unconfig_uart_gpios:
2329 if (is_blsp_uart(msm_uport))
2330 msm_hs_unconfig_uart_gpios(uport);
Mayank Rana679436e2012-03-31 05:41:14 +05302331deinit_uart_clk:
Mayank Ranacb589d82012-03-01 11:50:03 +05302332 clk_disable_unprepare(msm_uport->clk);
Mayank Rana679436e2012-03-31 05:41:14 +05302333 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05302334 clk_disable_unprepare(msm_uport->pclk);
Mayank Rana679436e2012-03-31 05:41:14 +05302335 wake_unlock(&msm_uport->dma_wake_lock);
2336
Mayank Rana55046232011-03-07 10:28:42 +05302337 return ret;
2338}
2339
2340/* Initialize tx and rx data structures */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002341static int uartdm_init_port(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05302342{
2343 int ret = 0;
2344 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2345 struct msm_hs_tx *tx = &msm_uport->tx;
2346 struct msm_hs_rx *rx = &msm_uport->rx;
2347
Mayank Rana55046232011-03-07 10:28:42 +05302348 init_waitqueue_head(&rx->wait);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302349 init_waitqueue_head(&tx->wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002350 wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
2351 wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
2352 "msm_serial_hs_dma");
2353
2354 tasklet_init(&rx->tlet, msm_serial_hs_rx_tlet,
2355 (unsigned long) &rx->tlet);
2356 tasklet_init(&tx->tlet, msm_serial_hs_tx_tlet,
2357 (unsigned long) &tx->tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302358
2359 rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
2360 UARTDM_RX_BUF_SIZE, 16, 0);
2361 if (!rx->pool) {
2362 pr_err("%s(): cannot allocate rx_buffer_pool", __func__);
2363 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002364 goto exit_tasket_init;
Mayank Rana55046232011-03-07 10:28:42 +05302365 }
2366
2367 rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
2368 if (!rx->buffer) {
2369 pr_err("%s(): cannot allocate rx->buffer", __func__);
2370 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002371 goto free_pool;
Mayank Rana55046232011-03-07 10:28:42 +05302372 }
2373
Mayank Ranaff398d02012-12-18 10:22:50 +05302374 /* Set up Uart Receive */
Mayank Rana05396b22013-03-16 19:10:11 +05302375 if (is_blsp_uart(msm_uport))
2376 msm_hs_write(uport, UARTDM_RFWR_ADDR, 32);
2377 else
2378 msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
Mayank Ranaff398d02012-12-18 10:22:50 +05302379
2380 INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
2381
2382 if (is_blsp_uart(msm_uport))
2383 return ret;
2384
2385 /* Allocate the command pointer. Needs to be 64 bit aligned */
2386 tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2387 if (!tx->command_ptr) {
2388 return -ENOMEM;
2389 goto free_rx_buffer;
2390 }
2391
2392 tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
2393 if (!tx->command_ptr_ptr) {
2394 ret = -ENOMEM;
2395 goto free_tx_command_ptr;
2396 }
2397
2398 tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
2399 sizeof(dmov_box), DMA_TO_DEVICE);
2400 tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
2401 tx->command_ptr_ptr,
2402 sizeof(u32), DMA_TO_DEVICE);
2403 tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
2404
Mayank Rana55046232011-03-07 10:28:42 +05302405 /* Allocate the command pointer. Needs to be 64 bit aligned */
2406 rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2407 if (!rx->command_ptr) {
2408 pr_err("%s(): cannot allocate rx->command_ptr", __func__);
2409 ret = -ENOMEM;
Mayank Ranaff398d02012-12-18 10:22:50 +05302410 goto free_tx_command_ptr_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302411 }
2412
Mayank Rana8431de82011-12-08 09:06:08 +05302413 rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
Mayank Rana55046232011-03-07 10:28:42 +05302414 if (!rx->command_ptr_ptr) {
2415 pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
2416 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002417 goto free_rx_command_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302418 }
2419
2420 rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
2421 (UARTDM_RX_BUF_SIZE >> 4);
2422
2423 rx->command_ptr->dst_row_addr = rx->rbuffer;
2424
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002425 rx->xfer.complete_func = msm_hs_dmov_rx_callback;
2426
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002427 rx->command_ptr->cmd = CMD_LC |
2428 CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
2429
2430 rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
2431 | (MSM_UARTDM_BURST_SIZE);
2432 rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE;
2433 rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
2434
Mayank Rana55046232011-03-07 10:28:42 +05302435 rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
2436 sizeof(dmov_box), DMA_TO_DEVICE);
2437
2438 *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
2439
2440 rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
Mayank Rana8431de82011-12-08 09:06:08 +05302441 sizeof(u32), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05302442 rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
2443
Mayank Rana55046232011-03-07 10:28:42 +05302444 return ret;
2445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002446free_rx_command_ptr:
Mayank Rana55046232011-03-07 10:28:42 +05302447 kfree(rx->command_ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002448
Mayank Ranaff398d02012-12-18 10:22:50 +05302449free_tx_command_ptr_ptr:
2450 kfree(msm_uport->tx.command_ptr_ptr);
2451 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
2452 sizeof(u32), DMA_TO_DEVICE);
2453 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
2454 sizeof(dmov_box), DMA_TO_DEVICE);
2455
2456free_tx_command_ptr:
2457 kfree(msm_uport->tx.command_ptr);
2458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002459free_rx_buffer:
Mayank Rana55046232011-03-07 10:28:42 +05302460 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002461 msm_uport->rx.rbuffer);
2462
2463free_pool:
Mayank Rana55046232011-03-07 10:28:42 +05302464 dma_pool_destroy(msm_uport->rx.pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002465
2466exit_tasket_init:
2467 wake_lock_destroy(&msm_uport->rx.wake_lock);
2468 wake_lock_destroy(&msm_uport->dma_wake_lock);
2469 tasklet_kill(&msm_uport->tx.tlet);
2470 tasklet_kill(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302471 return ret;
2472}
2473
Mayank Ranaff398d02012-12-18 10:22:50 +05302474struct msm_serial_hs_platform_data
2475 *msm_hs_dt_to_pdata(struct platform_device *pdev)
2476{
2477 struct device_node *node = pdev->dev.of_node;
2478 struct msm_serial_hs_platform_data *pdata;
2479 int rx_to_inject, ret;
2480
2481 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2482 if (!pdata) {
2483 pr_err("unable to allocate memory for platform data\n");
2484 return ERR_PTR(-ENOMEM);
2485 }
2486
2487 /* UART TX GPIO */
2488 pdata->uart_tx_gpio = of_get_named_gpio(node,
2489 "qcom,tx-gpio", 0);
2490 if (pdata->uart_tx_gpio < 0)
2491 pr_debug("uart_tx_gpio is not available\n");
2492
2493 /* UART RX GPIO */
2494 pdata->uart_rx_gpio = of_get_named_gpio(node,
2495 "qcom,rx-gpio", 0);
2496 if (pdata->uart_rx_gpio < 0)
2497 pr_debug("uart_rx_gpio is not available\n");
2498
2499 /* UART CTS GPIO */
2500 pdata->uart_cts_gpio = of_get_named_gpio(node,
2501 "qcom,cts-gpio", 0);
2502 if (pdata->uart_cts_gpio < 0)
2503 pr_debug("uart_cts_gpio is not available\n");
2504
2505 /* UART RFR GPIO */
2506 pdata->uart_rfr_gpio = of_get_named_gpio(node,
2507 "qcom,rfr-gpio", 0);
2508 if (pdata->uart_rfr_gpio < 0)
2509 pr_debug("uart_rfr_gpio is not available\n");
2510
2511 pdata->inject_rx_on_wakeup = of_property_read_bool(node,
2512 "qcom,inject-rx-on-wakeup");
2513
2514 if (pdata->inject_rx_on_wakeup) {
2515 ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
2516 &rx_to_inject);
2517 if (ret < 0) {
2518 pr_err("Error: Rx_char_to_inject not specified.\n");
2519 return ERR_PTR(ret);
2520 }
2521 pdata->rx_to_inject = (char)rx_to_inject;
2522 }
2523
2524 ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
2525 &pdata->bam_tx_ep_pipe_index);
2526 if (ret < 0) {
2527 pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
2528 return ERR_PTR(ret);
2529 }
2530
2531 if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
2532 pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
2533 pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
2534 return ERR_PTR(-EINVAL);
2535 }
2536
2537 ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
2538 &pdata->bam_rx_ep_pipe_index);
2539 if (ret < 0) {
2540 pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
2541 return ERR_PTR(ret);
2542 }
2543
2544 if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
2545 pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
2546 pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
2547 return ERR_PTR(-EINVAL);
2548 }
2549
2550 pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
2551 "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
2552 pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
2553 pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
2554 pdata->uart_rfr_gpio);
2555
2556 return pdata;
2557}
2558
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302559
2560/**
2561 * Deallocate UART peripheral's SPS endpoint
2562 * @msm_uport - Pointer to msm_hs_port structure
2563 * @ep - Pointer to sps endpoint data structure
2564 */
2565
2566static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
2567 struct msm_hs_sps_ep_conn_data *ep)
2568{
2569 struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
2570 struct sps_connect *sps_config = &ep->config;
2571
2572 dma_free_coherent(msm_uport->uport.dev,
2573 sps_config->desc.size,
2574 &sps_config->desc.phys_base,
2575 GFP_KERNEL);
2576 sps_free_endpoint(sps_pipe_handle);
2577}
2578
2579
2580/**
2581 * Allocate UART peripheral's SPS endpoint
2582 *
2583 * This function allocates endpoint context
2584 * by calling appropriate SPS driver APIs.
2585 *
2586 * @msm_uport - Pointer to msm_hs_port structure
2587 * @ep - Pointer to sps endpoint data structure
2588 * @is_produce - 1 means Producer endpoint
2589 * - 0 means Consumer endpoint
2590 *
2591 * @return - 0 if successful else negative value
2592 */
2593
2594static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
2595 struct msm_hs_sps_ep_conn_data *ep,
2596 bool is_producer)
2597{
2598 int rc = 0;
2599 struct sps_pipe *sps_pipe_handle;
2600 struct sps_connect *sps_config = &ep->config;
2601 struct sps_register_event *sps_event = &ep->event;
2602
2603 /* Allocate endpoint context */
2604 sps_pipe_handle = sps_alloc_endpoint();
2605 if (!sps_pipe_handle) {
2606 pr_err("msm_serial_hs: sps_alloc_endpoint() failed!!\n"
2607 "is_producer=%d", is_producer);
2608 rc = -ENOMEM;
2609 goto out;
2610 }
2611
2612 /* Get default connection configuration for an endpoint */
2613 rc = sps_get_config(sps_pipe_handle, sps_config);
2614 if (rc) {
2615 pr_err("msm_serial_hs: sps_get_config() failed!!\n"
2616 "pipe_handle=0x%x rc=%d", (u32)sps_pipe_handle, rc);
2617 goto get_config_err;
2618 }
2619
2620 /* Modify the default connection configuration */
2621 if (is_producer) {
2622 /* For UART producer transfer, source is UART peripheral
2623 where as destination is system memory */
2624 sps_config->source = msm_uport->bam_handle;
2625 sps_config->destination = SPS_DEV_HANDLE_MEM;
2626 sps_config->mode = SPS_MODE_SRC;
2627 sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
2628 sps_config->dest_pipe_index = 0;
2629 sps_config->options = SPS_O_EOT;
2630 } else {
2631 /* For UART consumer transfer, source is system memory
2632 where as destination is UART peripheral */
2633 sps_config->source = SPS_DEV_HANDLE_MEM;
2634 sps_config->destination = msm_uport->bam_handle;
2635 sps_config->mode = SPS_MODE_DEST;
2636 sps_config->src_pipe_index = 0;
2637 sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
2638 sps_config->options = SPS_O_EOT;
2639 }
2640
2641 sps_config->event_thresh = 0x10;
2642
2643 /* Allocate maximum descriptor fifo size */
2644 sps_config->desc.size = 65532;
2645 sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
2646 sps_config->desc.size,
2647 &sps_config->desc.phys_base,
2648 GFP_KERNEL);
2649 if (!sps_config->desc.base) {
2650 rc = -ENOMEM;
2651 pr_err("msm_serial_hs: dma_alloc_coherent() failed!!\n");
2652 goto get_config_err;
2653 }
2654 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
2655
2656 sps_event->mode = SPS_TRIGGER_CALLBACK;
2657 sps_event->options = SPS_O_EOT;
2658 if (is_producer)
2659 sps_event->callback = msm_hs_sps_rx_callback;
2660 else
2661 sps_event->callback = msm_hs_sps_tx_callback;
2662
2663 sps_event->user = (void *)msm_uport;
2664
2665 /* Now save the sps pipe handle */
2666 ep->pipe_handle = sps_pipe_handle;
2667 pr_debug("msm_serial_hs: success !! %s: pipe_handle=0x%x\n"
2668 "desc_fifo.phys_base=0x%x\n",
2669 is_producer ? "READ" : "WRITE",
2670 (u32)sps_pipe_handle, sps_config->desc.phys_base);
2671 return 0;
2672
2673get_config_err:
2674 sps_free_endpoint(sps_pipe_handle);
2675out:
2676 return rc;
2677}
2678
2679/**
2680 * Initialize SPS HW connected with UART core
2681 *
2682 * This function register BAM HW resources with
2683 * SPS driver and then initialize 2 SPS endpoints
2684 *
2685 * msm_uport - Pointer to msm_hs_port structure
2686 *
2687 * @return - 0 if successful else negative value
2688 */
2689
2690static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
2691{
2692 int rc = 0;
2693 struct sps_bam_props bam = {0};
2694 u32 bam_handle;
2695
2696 rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
2697 if (rc || !bam_handle) {
2698 bam.phys_addr = msm_uport->bam_mem;
2699 bam.virt_addr = msm_uport->bam_base;
2700 /*
2701 * This event thresold value is only significant for BAM-to-BAM
2702 * transfer. It's ignored for BAM-to-System mode transfer.
2703 */
2704 bam.event_threshold = 0x10; /* Pipe event threshold */
2705 bam.summing_threshold = 1; /* BAM event threshold */
2706
2707 /* SPS driver wll handle the UART BAM IRQ */
2708 bam.irq = (u32)msm_uport->bam_irq;
2709 bam.manage = SPS_BAM_MGR_LOCAL;
2710
2711 pr_debug("msm_serial_hs: bam physical base=0x%x\n",
2712 (u32)bam.phys_addr);
2713 pr_debug("msm_serial_hs: bam virtual base=0x%x\n",
2714 (u32)bam.virt_addr);
2715
2716 /* Register UART Peripheral BAM device to SPS driver */
2717 rc = sps_register_bam_device(&bam, &bam_handle);
2718 if (rc) {
2719 pr_err("msm_serial_hs: BAM device register failed\n");
2720 return rc;
2721 }
2722 pr_info("msm_serial_hs: BAM device registered. bam_handle=0x%x",
2723 msm_uport->bam_handle);
2724 }
2725 msm_uport->bam_handle = bam_handle;
2726
2727 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
2728 UART_SPS_PROD_PERIPHERAL);
2729 if (rc) {
2730 pr_err("%s: Failed to Init Producer BAM-pipe", __func__);
2731 goto deregister_bam;
2732 }
2733
2734 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
2735 UART_SPS_CONS_PERIPHERAL);
2736 if (rc) {
2737 pr_err("%s: Failed to Init Consumer BAM-pipe", __func__);
2738 goto deinit_ep_conn_prod;
2739 }
2740 return 0;
2741
2742deinit_ep_conn_prod:
2743 msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
2744deregister_bam:
2745 sps_deregister_bam_device(msm_uport->bam_handle);
2746 return rc;
2747}
2748
Saket Saurabh10e88b32013-02-04 15:26:34 +05302749#define BLSP_UART_NR 12
2750static int deviceid[BLSP_UART_NR] = {0};
2751static atomic_t msm_serial_hs_next_id = ATOMIC_INIT(0);
2752
Mayank Rana55046232011-03-07 10:28:42 +05302753static int __devinit msm_hs_probe(struct platform_device *pdev)
2754{
Saket Saurabh10e88b32013-02-04 15:26:34 +05302755 int ret = 0, alias_num = -1;
Mayank Rana55046232011-03-07 10:28:42 +05302756 struct uart_port *uport;
2757 struct msm_hs_port *msm_uport;
Mayank Ranaff398d02012-12-18 10:22:50 +05302758 struct resource *core_resource;
2759 struct resource *bam_resource;
Mayank Rana55046232011-03-07 10:28:42 +05302760 struct resource *resource;
Mayank Ranaff398d02012-12-18 10:22:50 +05302761 int core_irqres, bam_irqres;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002762 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
Mayank Ranaff398d02012-12-18 10:22:50 +05302763
2764 if (pdev->dev.of_node) {
2765 dev_dbg(&pdev->dev, "device tree enabled\n");
2766 pdata = msm_hs_dt_to_pdata(pdev);
2767 if (IS_ERR(pdata))
2768 return PTR_ERR(pdata);
2769
Saket Saurabh10e88b32013-02-04 15:26:34 +05302770 if (pdev->id == -1) {
2771 pdev->id = atomic_inc_return(&msm_serial_hs_next_id)-1;
2772 deviceid[pdev->id] = 1;
2773 }
2774
2775 /* Use alias from device tree if present
2776 * Alias is used as an optional property
2777 */
2778 alias_num = of_alias_get_id(pdev->dev.of_node, "uart");
2779 if (alias_num >= 0) {
2780 /* If alias_num is between 0 and 11, check that it not
2781 * equal to previous incremented pdev-ids. If it is
2782 * equal to previous pdev.ids , fail deviceprobe.
2783 */
2784 if (alias_num < BLSP_UART_NR) {
2785 if (deviceid[alias_num] == 0) {
2786 pdev->id = alias_num;
2787 } else {
2788 pr_err("alias_num=%d already used\n",
2789 alias_num);
2790 return -EINVAL;
2791 }
2792 } else {
2793 pdev->id = alias_num;
2794 }
2795 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302796
2797 pdev->dev.platform_data = pdata;
2798 }
Mayank Rana55046232011-03-07 10:28:42 +05302799
2800 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
Mayank Ranaff398d02012-12-18 10:22:50 +05302801 pr_err("Invalid plaform device ID = %d\n", pdev->id);
Mayank Rana55046232011-03-07 10:28:42 +05302802 return -EINVAL;
2803 }
2804
2805 msm_uport = &q_uart_port[pdev->id];
2806 uport = &msm_uport->uport;
Mayank Rana55046232011-03-07 10:28:42 +05302807 uport->dev = &pdev->dev;
2808
Mayank Ranaff398d02012-12-18 10:22:50 +05302809 if (pdev->dev.of_node)
2810 msm_uport->uart_type = BLSP_HSUART;
Mayank Rana55046232011-03-07 10:28:42 +05302811
Mayank Ranaff398d02012-12-18 10:22:50 +05302812 /* Get required resources for BAM HSUART */
2813 if (is_blsp_uart(msm_uport)) {
2814 core_resource = platform_get_resource_byname(pdev,
2815 IORESOURCE_MEM, "core_mem");
2816 bam_resource = platform_get_resource_byname(pdev,
2817 IORESOURCE_MEM, "bam_mem");
2818 core_irqres = platform_get_irq_byname(pdev, "core_irq");
2819 bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002820
Mayank Ranaff398d02012-12-18 10:22:50 +05302821 if (!core_resource) {
2822 pr_err("Invalid core HSUART Resources.\n");
2823 return -ENXIO;
2824 }
2825
2826 if (!bam_resource) {
2827 pr_err("Invalid BAM HSUART Resources.\n");
2828 return -ENXIO;
2829 }
2830
2831 if (!core_irqres) {
2832 pr_err("Invalid core irqres Resources.\n");
2833 return -ENXIO;
2834 }
2835 if (!bam_irqres) {
2836 pr_err("Invalid bam irqres Resources.\n");
2837 return -ENXIO;
2838 }
2839
2840 uport->mapbase = core_resource->start;
2841
2842 uport->membase = ioremap(uport->mapbase,
2843 resource_size(core_resource));
2844 if (unlikely(!uport->membase)) {
2845 pr_err("UART Resource ioremap Failed.\n");
2846 return -ENOMEM;
2847 }
2848 msm_uport->bam_mem = bam_resource->start;
2849 msm_uport->bam_base = ioremap(msm_uport->bam_mem,
2850 resource_size(bam_resource));
2851 if (unlikely(!msm_uport->bam_base)) {
2852 pr_err("UART BAM Resource ioremap Failed.\n");
2853 iounmap(uport->membase);
2854 return -ENOMEM;
2855 }
2856
2857 uport->irq = core_irqres;
2858 msm_uport->bam_irq = bam_irqres;
2859
Mayank Rana88d49142013-01-16 17:28:53 +05302860 msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2861 if (!msm_uport->bus_scale_table) {
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302862 pr_err("BLSP UART: Bus scaling is disabled.\n");
Mayank Rana88d49142013-01-16 17:28:53 +05302863 } else {
2864 msm_uport->bus_perf_client =
2865 msm_bus_scale_register_client
2866 (msm_uport->bus_scale_table);
2867 if (IS_ERR(&msm_uport->bus_perf_client)) {
2868 pr_err("%s(): Bus client register failed.\n",
2869 __func__);
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302870 ret = -EINVAL;
Mayank Rana88d49142013-01-16 17:28:53 +05302871 goto unmap_memory;
2872 }
2873 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302874 } else {
2875
2876 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2877 if (unlikely(!resource))
2878 return -ENXIO;
2879 uport->mapbase = resource->start;
2880 uport->membase = ioremap(uport->mapbase,
2881 resource_size(resource));
2882 if (unlikely(!uport->membase))
2883 return -ENOMEM;
2884
2885 uport->irq = platform_get_irq(pdev, 0);
2886 if (unlikely((int)uport->irq < 0)) {
2887 pr_err("UART IRQ Failed.\n");
2888 iounmap(uport->membase);
2889 return -ENXIO;
2890 }
2891 }
Mayank Rana55046232011-03-07 10:28:42 +05302892
Mayank Rana55046232011-03-07 10:28:42 +05302893 if (pdata == NULL)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002894 msm_uport->wakeup.irq = -1;
2895 else {
2896 msm_uport->wakeup.irq = pdata->wakeup_irq;
2897 msm_uport->wakeup.ignore = 1;
2898 msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
2899 msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
2900
Mayank Ranaff398d02012-12-18 10:22:50 +05302901 if (unlikely(msm_uport->wakeup.irq < 0)) {
2902 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302903 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302904 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002905
Mayank Ranaff398d02012-12-18 10:22:50 +05302906 if (is_blsp_uart(msm_uport)) {
2907 msm_uport->bam_tx_ep_pipe_index =
2908 pdata->bam_tx_ep_pipe_index;
2909 msm_uport->bam_rx_ep_pipe_index =
2910 pdata->bam_rx_ep_pipe_index;
2911 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002912 }
Mayank Rana55046232011-03-07 10:28:42 +05302913
Mayank Ranaff398d02012-12-18 10:22:50 +05302914 if (!is_blsp_uart(msm_uport)) {
Mayank Rana55046232011-03-07 10:28:42 +05302915
Mayank Ranaff398d02012-12-18 10:22:50 +05302916 resource = platform_get_resource_byname(pdev,
2917 IORESOURCE_DMA, "uartdm_channels");
2918 if (unlikely(!resource)) {
2919 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302920 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302921 }
2922
2923 msm_uport->dma_tx_channel = resource->start;
2924 msm_uport->dma_rx_channel = resource->end;
2925
2926 resource = platform_get_resource_byname(pdev,
2927 IORESOURCE_DMA, "uartdm_crci");
2928 if (unlikely(!resource)) {
2929 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302930 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302931 }
2932
2933 msm_uport->dma_tx_crci = resource->start;
2934 msm_uport->dma_rx_crci = resource->end;
2935 }
Mayank Rana55046232011-03-07 10:28:42 +05302936
2937 uport->iotype = UPIO_MEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002938 uport->fifosize = 64;
Mayank Rana55046232011-03-07 10:28:42 +05302939 uport->ops = &msm_hs_ops;
2940 uport->flags = UPF_BOOT_AUTOCONF;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002941 uport->uartclk = 7372800;
Mayank Rana55046232011-03-07 10:28:42 +05302942 msm_uport->imr_reg = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002943
Matt Wagantalle2522372011-08-17 14:52:21 -07002944 msm_uport->clk = clk_get(&pdev->dev, "core_clk");
Mayank Ranaff398d02012-12-18 10:22:50 +05302945 if (IS_ERR(msm_uport->clk)) {
2946 ret = PTR_ERR(msm_uport->clk);
Mayank Rana43c8baa2013-02-23 14:57:14 +05302947 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302948 }
Mayank Rana55046232011-03-07 10:28:42 +05302949
Matt Wagantalle2522372011-08-17 14:52:21 -07002950 msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002951 /*
2952 * Some configurations do not require explicit pclk control so
2953 * do not flag error on pclk get failure.
2954 */
2955 if (IS_ERR(msm_uport->pclk))
2956 msm_uport->pclk = NULL;
2957
2958 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
2959 if (ret) {
2960 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Rana43c8baa2013-02-23 14:57:14 +05302961 goto put_clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002962 }
2963
Mayank Ranacb589d82012-03-01 11:50:03 +05302964 msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
2965 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
2966 if (!msm_uport->hsuart_wq) {
2967 pr_err("%s(): Unable to create workqueue hsuart_wq\n",
2968 __func__);
Mayank Ranaff398d02012-12-18 10:22:50 +05302969 ret = -ENOMEM;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302970 goto put_clk;
Mayank Ranacb589d82012-03-01 11:50:03 +05302971 }
2972
2973 INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302974
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302975 /* Init work for sps_disconnect in stop_rx_locked */
2976 INIT_WORK(&msm_uport->disconnect_rx_endpoint,
2977 hsuart_disconnect_rx_endpoint_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05302978 mutex_init(&msm_uport->clk_mutex);
2979
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302980 /* Initialize SPS HW connected with UART core */
2981 if (is_blsp_uart(msm_uport)) {
2982 ret = msm_hs_sps_init(msm_uport);
2983 if (unlikely(ret)) {
2984 pr_err("SPS Initialization failed ! err=%d", ret);
Mayank Rana43c8baa2013-02-23 14:57:14 +05302985 goto destroy_mutex;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302986 }
2987 }
2988
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302989 msm_hs_bus_voting(msm_uport, BUS_SCALING);
2990
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002991 clk_prepare_enable(msm_uport->clk);
2992 if (msm_uport->pclk)
2993 clk_prepare_enable(msm_uport->pclk);
2994
Mayank Rana55046232011-03-07 10:28:42 +05302995 ret = uartdm_init_port(uport);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002996 if (unlikely(ret)) {
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05302997 goto err_clock;
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07002998 }
Mayank Rana55046232011-03-07 10:28:42 +05302999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003000 /* configure the CR Protection to Enable */
3001 msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003002
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003003
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003004 /*
3005 * Enable Command register protection before going ahead as this hw
3006 * configuration makes sure that issued cmd to CR register gets complete
3007 * before next issued cmd start. Hence mb() requires here.
3008 */
3009 mb();
Mayank Rana55046232011-03-07 10:28:42 +05303010
3011 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
3012 hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
3013 HRTIMER_MODE_REL);
3014 msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
3015 msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
3016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003017 ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
3018 if (unlikely(ret))
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303019 goto err_clock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003020
3021 msm_serial_debugfs_init(msm_uport, pdev->id);
3022
Mayank Rana55046232011-03-07 10:28:42 +05303023 uport->line = pdev->id;
Saket Saurabh51690e52012-08-17 14:17:46 +05303024 if (pdata != NULL && pdata->userid && pdata->userid <= UARTDM_NR)
3025 uport->line = pdata->userid;
Mayank Ranaff398d02012-12-18 10:22:50 +05303026 ret = uart_add_one_port(&msm_hs_driver, uport);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303027 if (!ret) {
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303028 msm_hs_bus_voting(msm_uport, BUS_RESET);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303029 clk_disable_unprepare(msm_uport->clk);
3030 if (msm_uport->pclk)
3031 clk_disable_unprepare(msm_uport->pclk);
Mayank Ranaff398d02012-12-18 10:22:50 +05303032 return ret;
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303033 }
Mayank Ranaff398d02012-12-18 10:22:50 +05303034
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303035err_clock:
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303036
3037 msm_hs_bus_voting(msm_uport, BUS_RESET);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303038 clk_disable_unprepare(msm_uport->clk);
3039 if (msm_uport->pclk)
3040 clk_disable_unprepare(msm_uport->pclk);
Mayank Rana43c8baa2013-02-23 14:57:14 +05303041
3042destroy_mutex:
3043 mutex_destroy(&msm_uport->clk_mutex);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303044 destroy_workqueue(msm_uport->hsuart_wq);
Mayank Rana43c8baa2013-02-23 14:57:14 +05303045
3046put_clk:
3047 if (msm_uport->pclk)
3048 clk_put(msm_uport->pclk);
3049
3050 if (msm_uport->clk)
3051 clk_put(msm_uport->clk);
3052
3053deregister_bus_client:
3054 if (is_blsp_uart(msm_uport))
3055 msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
Mayank Ranaff398d02012-12-18 10:22:50 +05303056unmap_memory:
3057 iounmap(uport->membase);
3058 if (is_blsp_uart(msm_uport))
3059 iounmap(msm_uport->bam_base);
3060
3061 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303062}
3063
3064static int __init msm_serial_hs_init(void)
3065{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003066 int ret;
3067 int i;
Mayank Rana55046232011-03-07 10:28:42 +05303068
3069 /* Init all UARTS as non-configured */
3070 for (i = 0; i < UARTDM_NR; i++)
3071 q_uart_port[i].uport.type = PORT_UNKNOWN;
3072
Mayank Rana55046232011-03-07 10:28:42 +05303073 ret = uart_register_driver(&msm_hs_driver);
3074 if (unlikely(ret)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003075 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
3076 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303077 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003078 debug_base = debugfs_create_dir("msm_serial_hs", NULL);
3079 if (IS_ERR_OR_NULL(debug_base))
3080 pr_info("msm_serial_hs: Cannot create debugfs dir\n");
Mayank Rana55046232011-03-07 10:28:42 +05303081
3082 ret = platform_driver_register(&msm_serial_hs_platform_driver);
3083 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003084 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
3085 debugfs_remove_recursive(debug_base);
3086 uart_unregister_driver(&msm_hs_driver);
3087 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303088 }
3089
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003090 printk(KERN_INFO "msm_serial_hs module loaded\n");
Mayank Rana55046232011-03-07 10:28:42 +05303091 return ret;
3092}
Mayank Rana55046232011-03-07 10:28:42 +05303093
3094/*
3095 * Called by the upper layer when port is closed.
3096 * - Disables the port
3097 * - Unhook the ISR
3098 */
3099static void msm_hs_shutdown(struct uart_port *uport)
3100{
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303101 int ret;
3102 unsigned int data;
3103 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05303104 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05303105 struct platform_device *pdev = to_platform_device(uport->dev);
3106 const struct msm_serial_hs_platform_data *pdata =
3107 pdev->dev.platform_data;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303108 struct msm_hs_tx *tx = &msm_uport->tx;
3109 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05303110
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303111 if (msm_uport->tx.dma_in_flight) {
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303112 if (!is_blsp_uart(msm_uport)) {
3113 spin_lock_irqsave(&uport->lock, flags);
3114 /* disable UART TX interface to DM */
3115 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
3116 data &= ~UARTDM_TX_DM_EN_BMSK;
3117 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
3118 /* turn OFF UART Transmitter */
3119 msm_hs_write(uport, UARTDM_CR_ADDR,
3120 UARTDM_CR_TX_DISABLE_BMSK);
3121 /* reset UART TX */
3122 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
3123 /* reset UART TX Error */
3124 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX_ERROR);
3125 msm_uport->tx.flush = FLUSH_STOP;
3126 spin_unlock_irqrestore(&uport->lock, flags);
3127 /* discard flush */
3128 msm_dmov_flush(msm_uport->dma_tx_channel, 0);
3129 ret = wait_event_timeout(msm_uport->tx.wait,
3130 msm_uport->tx.flush == FLUSH_SHUTDOWN, 100);
3131 if (!ret)
3132 pr_err("%s():HSUART TX Stalls.\n", __func__);
3133 } else {
3134 /* BAM Disconnect for TX */
3135 sps_disconnect(sps_pipe_handle);
3136 }
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303137 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003138 tasklet_kill(&msm_uport->tx.tlet);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303139 BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003140 wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
3141 tasklet_kill(&msm_uport->rx.tlet);
3142 cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05303143 flush_workqueue(msm_uport->hsuart_wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003144 pm_runtime_disable(uport->dev);
3145 pm_runtime_set_suspended(uport->dev);
Mayank Rana55046232011-03-07 10:28:42 +05303146
3147 /* Disable the transmitter */
3148 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
3149 /* Disable the receiver */
3150 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
3151
Mayank Rana55046232011-03-07 10:28:42 +05303152 msm_uport->imr_reg = 0;
3153 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003154 /*
3155 * Complete all device write before actually disabling uartclk.
3156 * Hence mb() requires here.
3157 */
3158 mb();
Mayank Rana88d49142013-01-16 17:28:53 +05303159
3160 /* Reset PNOC Bus Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303161 msm_hs_bus_voting(msm_uport, BUS_RESET);
Mayank Rana88d49142013-01-16 17:28:53 +05303162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163 if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
Mayank Ranacb589d82012-03-01 11:50:03 +05303164 /* to balance clk_state */
3165 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003166 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05303167 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003168 wake_unlock(&msm_uport->dma_wake_lock);
3169 }
Mayank Rana55046232011-03-07 10:28:42 +05303170
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303171 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
Mayank Rana55046232011-03-07 10:28:42 +05303172 dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
3173 UART_XMIT_SIZE, DMA_TO_DEVICE);
3174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003175 if (use_low_power_wakeup(msm_uport))
3176 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Mayank Rana55046232011-03-07 10:28:42 +05303177
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003178 /* Free the interrupt */
3179 free_irq(uport->irq, msm_uport);
3180 if (use_low_power_wakeup(msm_uport))
3181 free_irq(msm_uport->wakeup.irq, msm_uport);
Mayank Rana40836782012-11-16 14:45:47 +05303182
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05303183 if (is_blsp_uart(msm_uport)) {
3184 msm_hs_unconfig_uart_gpios(uport);
3185 } else {
3186 if (pdata && pdata->gpio_config)
3187 if (pdata->gpio_config(0))
3188 dev_err(uport->dev, "GPIO config error\n");
3189 }
Mayank Rana55046232011-03-07 10:28:42 +05303190}
3191
3192static void __exit msm_serial_hs_exit(void)
3193{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003194 printk(KERN_INFO "msm_serial_hs module removed\n");
Mayank Rana17e0e1a2012-04-07 02:10:33 +05303195 debugfs_remove_recursive(debug_base);
Mayank Rana55046232011-03-07 10:28:42 +05303196 platform_driver_unregister(&msm_serial_hs_platform_driver);
3197 uart_unregister_driver(&msm_hs_driver);
3198}
Mayank Rana55046232011-03-07 10:28:42 +05303199
Mayank Rana55046232011-03-07 10:28:42 +05303200static int msm_hs_runtime_idle(struct device *dev)
3201{
3202 /*
3203 * returning success from idle results in runtime suspend to be
3204 * called
3205 */
3206 return 0;
3207}
3208
3209static int msm_hs_runtime_resume(struct device *dev)
3210{
3211 struct platform_device *pdev = container_of(dev, struct
3212 platform_device, dev);
3213 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303214 msm_hs_request_clock_on(&msm_uport->uport);
3215 return 0;
3216}
3217
3218static int msm_hs_runtime_suspend(struct device *dev)
3219{
3220 struct platform_device *pdev = container_of(dev, struct
3221 platform_device, dev);
3222 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303223 msm_hs_request_clock_off(&msm_uport->uport);
3224 return 0;
3225}
Mayank Rana55046232011-03-07 10:28:42 +05303226
3227static const struct dev_pm_ops msm_hs_dev_pm_ops = {
3228 .runtime_suspend = msm_hs_runtime_suspend,
3229 .runtime_resume = msm_hs_runtime_resume,
3230 .runtime_idle = msm_hs_runtime_idle,
3231};
3232
Mayank Ranaff398d02012-12-18 10:22:50 +05303233static struct of_device_id msm_hs_match_table[] = {
3234 { .compatible = "qcom,msm-hsuart-v14" },
3235 {}
3236};
3237
Mayank Rana55046232011-03-07 10:28:42 +05303238static struct platform_driver msm_serial_hs_platform_driver = {
Mayank Rana17e0e1a2012-04-07 02:10:33 +05303239 .probe = msm_hs_probe,
Mayank Rana55046232011-03-07 10:28:42 +05303240 .remove = __devexit_p(msm_hs_remove),
3241 .driver = {
3242 .name = "msm_serial_hs",
Mayank Rana55046232011-03-07 10:28:42 +05303243 .pm = &msm_hs_dev_pm_ops,
Mayank Ranaff398d02012-12-18 10:22:50 +05303244 .of_match_table = msm_hs_match_table,
Mayank Rana55046232011-03-07 10:28:42 +05303245 },
3246};
3247
3248static struct uart_driver msm_hs_driver = {
3249 .owner = THIS_MODULE,
3250 .driver_name = "msm_serial_hs",
3251 .dev_name = "ttyHS",
3252 .nr = UARTDM_NR,
3253 .cons = 0,
3254};
3255
3256static struct uart_ops msm_hs_ops = {
3257 .tx_empty = msm_hs_tx_empty,
3258 .set_mctrl = msm_hs_set_mctrl_locked,
3259 .get_mctrl = msm_hs_get_mctrl_locked,
3260 .stop_tx = msm_hs_stop_tx_locked,
3261 .start_tx = msm_hs_start_tx_locked,
3262 .stop_rx = msm_hs_stop_rx_locked,
3263 .enable_ms = msm_hs_enable_ms_locked,
3264 .break_ctl = msm_hs_break_ctl,
3265 .startup = msm_hs_startup,
3266 .shutdown = msm_hs_shutdown,
3267 .set_termios = msm_hs_set_termios,
Mayank Rana55046232011-03-07 10:28:42 +05303268 .type = msm_hs_type,
3269 .config_port = msm_hs_config_port,
3270 .release_port = msm_hs_release_port,
3271 .request_port = msm_hs_request_port,
Saket Saurabhce394102012-10-29 19:51:28 +05303272 .flush_buffer = msm_hs_flush_buffer,
Mayank Rana55046232011-03-07 10:28:42 +05303273};
3274
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003275module_init(msm_serial_hs_init);
3276module_exit(msm_serial_hs_exit);
Mayank Rana55046232011-03-07 10:28:42 +05303277MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
3278MODULE_VERSION("1.2");
3279MODULE_LICENSE("GPL v2");