blob: 88060046ae2e3c0e574d8e226e584deb097583a6 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* drivers/serial/msm_serial_hs.c
Mayank Rana55046232011-03-07 10:28:42 +05302 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 * MSM 7k High speed uart driver
4 *
Mayank Rana55046232011-03-07 10:28:42 +05305 * Copyright (c) 2008 Google Inc.
Mayank Ranaadc41562013-01-04 12:44:01 +05306 * Copyright (c) 2007-2013, The Linux Foundation. All rights reserved.
Mayank Rana55046232011-03-07 10:28:42 +05307 * Modified: Nick Pelly <npelly@google.com>
8 *
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07009 * All source code in this file is licensed under the following license
10 * except where indicated.
11 *
Mayank Rana55046232011-03-07 10:28:42 +053012 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * Has optional support for uart power management independent of linux
22 * suspend/resume:
23 *
24 * RX wakeup.
25 * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
26 * UART RX pin). This should only be used if there is not a wakeup
27 * GPIO on the UART CTS, and the first RX byte is known (for example, with the
28 * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
29 * always be lost. RTS will be asserted even while the UART is off in this mode
30 * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
31 */
32
33#include <linux/module.h>
34
35#include <linux/serial.h>
36#include <linux/serial_core.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/interrupt.h>
40#include <linux/irq.h>
41#include <linux/io.h>
42#include <linux/ioport.h>
Saket Saurabh10e88b32013-02-04 15:26:34 +053043#include <linux/atomic.h>
Mayank Rana55046232011-03-07 10:28:42 +053044#include <linux/kernel.h>
45#include <linux/timer.h>
46#include <linux/clk.h>
47#include <linux/platform_device.h>
48#include <linux/pm_runtime.h>
49#include <linux/dma-mapping.h>
50#include <linux/dmapool.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070051#include <linux/tty_flip.h>
Mayank Rana55046232011-03-07 10:28:42 +053052#include <linux/wait.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053#include <linux/sysfs.h>
54#include <linux/stat.h>
55#include <linux/device.h>
56#include <linux/wakelock.h>
57#include <linux/debugfs.h>
Mayank Ranaff398d02012-12-18 10:22:50 +053058#include <linux/of.h>
59#include <linux/of_device.h>
60#include <linux/of_gpio.h>
Saket Saurabhfe3b93b2013-02-04 18:44:12 +053061#include <linux/gpio.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062#include <asm/atomic.h>
Mayank Rana55046232011-03-07 10:28:42 +053063#include <asm/irq.h>
Mayank Rana55046232011-03-07 10:28:42 +053064
65#include <mach/hardware.h>
66#include <mach/dma.h>
Saket Saurabhcbf6c522013-01-07 16:30:37 +053067#include <mach/sps.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068#include <mach/msm_serial_hs.h>
Mayank Rana88d49142013-01-16 17:28:53 +053069#include <mach/msm_bus.h>
Mayank Rana55046232011-03-07 10:28:42 +053070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070071#include "msm_serial_hs_hwreg.h"
Saket Saurabhcbf6c522013-01-07 16:30:37 +053072#define UART_SPS_CONS_PERIPHERAL 0
73#define UART_SPS_PROD_PERIPHERAL 1
Mayank Rana55046232011-03-07 10:28:42 +053074
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075static int hs_serial_debug_mask = 1;
76module_param_named(debug_mask, hs_serial_debug_mask,
77 int, S_IRUGO | S_IWUSR | S_IWGRP);
Mayank Ranaff398d02012-12-18 10:22:50 +053078/*
79 * There are 3 different kind of UART Core available on MSM.
80 * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
81 * and BSLP based HSUART.
82 */
83enum uart_core_type {
84 LEGACY_HSUART,
85 GSBI_HSUART,
86 BLSP_HSUART,
87};
Mayank Rana55046232011-03-07 10:28:42 +053088
Mayank Rana55046232011-03-07 10:28:42 +053089enum flush_reason {
90 FLUSH_NONE,
91 FLUSH_DATA_READY,
92 FLUSH_DATA_INVALID, /* values after this indicate invalid data */
93 FLUSH_IGNORE = FLUSH_DATA_INVALID,
94 FLUSH_STOP,
95 FLUSH_SHUTDOWN,
96};
97
Mayank Rana55046232011-03-07 10:28:42 +053098enum msm_hs_clk_states_e {
99 MSM_HS_CLK_PORT_OFF, /* port not in use */
100 MSM_HS_CLK_OFF, /* clock disabled */
101 MSM_HS_CLK_REQUEST_OFF, /* disable after TX and RX flushed */
102 MSM_HS_CLK_ON, /* clock enabled */
103};
104
105/* Track the forced RXSTALE flush during clock off sequence.
106 * These states are only valid during MSM_HS_CLK_REQUEST_OFF */
107enum msm_hs_clk_req_off_state_e {
108 CLK_REQ_OFF_START,
109 CLK_REQ_OFF_RXSTALE_ISSUED,
110 CLK_REQ_OFF_FLUSH_ISSUED,
111 CLK_REQ_OFF_RXSTALE_FLUSHED,
112};
113
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530114/* SPS data structures to support HSUART with BAM
115 * @sps_pipe - This struct defines BAM pipe descriptor
116 * @sps_connect - This struct defines a connection's end point
117 * @sps_register - This struct defines a event registration parameters
118 */
119struct msm_hs_sps_ep_conn_data {
120 struct sps_pipe *pipe_handle;
121 struct sps_connect config;
122 struct sps_register_event event;
123};
124
Mayank Rana55046232011-03-07 10:28:42 +0530125struct msm_hs_tx {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 unsigned int tx_ready_int_en; /* ok to dma more tx */
127 unsigned int dma_in_flight; /* tx dma in progress */
Mayank Ranaaf2f0082012-05-22 10:16:02 +0530128 enum flush_reason flush;
129 wait_queue_head_t wait;
Mayank Rana55046232011-03-07 10:28:42 +0530130 struct msm_dmov_cmd xfer;
131 dmov_box *command_ptr;
132 u32 *command_ptr_ptr;
133 dma_addr_t mapped_cmd_ptr;
134 dma_addr_t mapped_cmd_ptr_ptr;
135 int tx_count;
136 dma_addr_t dma_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530138 struct msm_hs_sps_ep_conn_data cons;
Mayank Rana55046232011-03-07 10:28:42 +0530139};
140
Mayank Rana55046232011-03-07 10:28:42 +0530141struct msm_hs_rx {
142 enum flush_reason flush;
143 struct msm_dmov_cmd xfer;
144 dma_addr_t cmdptr_dmaaddr;
145 dmov_box *command_ptr;
146 u32 *command_ptr_ptr;
147 dma_addr_t mapped_cmd_ptr;
148 wait_queue_head_t wait;
149 dma_addr_t rbuffer;
150 unsigned char *buffer;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151 unsigned int buffer_pending;
Mayank Rana55046232011-03-07 10:28:42 +0530152 struct dma_pool *pool;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700153 struct wake_lock wake_lock;
154 struct delayed_work flip_insert_work;
155 struct tasklet_struct tlet;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530156 struct msm_hs_sps_ep_conn_data prod;
Mayank Rana55046232011-03-07 10:28:42 +0530157};
158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159enum buffer_states {
160 NONE_PENDING = 0x0,
161 FIFO_OVERRUN = 0x1,
162 PARITY_ERROR = 0x2,
163 CHARS_NORMAL = 0x4,
164};
165
166/* optional low power wakeup, typically on a GPIO RX irq */
167struct msm_hs_wakeup {
Mayank Rana55046232011-03-07 10:28:42 +0530168 int irq; /* < 0 indicates low power wakeup disabled */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700169 unsigned char ignore; /* bool */
170
171 /* bool: inject char into rx tty on wakeup */
Mayank Rana55046232011-03-07 10:28:42 +0530172 unsigned char inject_rx;
173 char rx_to_inject;
174};
175
Mayank Rana55046232011-03-07 10:28:42 +0530176struct msm_hs_port {
177 struct uart_port uport;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 unsigned long imr_reg; /* shadow value of UARTDM_IMR */
Mayank Rana55046232011-03-07 10:28:42 +0530179 struct clk *clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 struct clk *pclk;
Mayank Rana55046232011-03-07 10:28:42 +0530181 struct msm_hs_tx tx;
182 struct msm_hs_rx rx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700183 /* gsbi uarts have to do additional writes to gsbi memory */
184 /* block and top control status block. The following pointers */
185 /* keep a handle to these blocks. */
186 unsigned char __iomem *mapped_gsbi;
Mayank Rana55046232011-03-07 10:28:42 +0530187 int dma_tx_channel;
188 int dma_rx_channel;
189 int dma_tx_crci;
190 int dma_rx_crci;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191 struct hrtimer clk_off_timer; /* to poll TXEMT before clock off */
Mayank Rana55046232011-03-07 10:28:42 +0530192 ktime_t clk_off_delay;
193 enum msm_hs_clk_states_e clk_state;
194 enum msm_hs_clk_req_off_state_e clk_req_off_state;
195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 struct msm_hs_wakeup wakeup;
197 struct wake_lock dma_wake_lock; /* held while any DMA active */
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530198
199 struct dentry *loopback_dir;
Mayank Ranacb589d82012-03-01 11:50:03 +0530200 struct work_struct clock_off_w; /* work for actual clock off */
201 struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
202 struct mutex clk_mutex; /* mutex to guard against clock off/clock on */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530203 struct work_struct disconnect_rx_endpoint; /* disconnect rx_endpoint */
Saket Saurabhce394102012-10-29 19:51:28 +0530204 bool tty_flush_receive;
Mayank Ranaff398d02012-12-18 10:22:50 +0530205 enum uart_core_type uart_type;
206 u32 bam_handle;
207 resource_size_t bam_mem;
208 int bam_irq;
209 unsigned char __iomem *bam_base;
210 unsigned int bam_tx_ep_pipe_index;
211 unsigned int bam_rx_ep_pipe_index;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530212 /* struct sps_event_notify is an argument passed when triggering a
213 * callback event object registered for an SPS connection end point.
214 */
215 struct sps_event_notify notify;
Mayank Rana88d49142013-01-16 17:28:53 +0530216 /* bus client handler */
217 u32 bus_perf_client;
218 /* BLSP UART required BUS Scaling data */
219 struct msm_bus_scale_pdata *bus_scale_table;
Mayank Rana9c8bda92013-02-28 11:58:04 +0530220 bool rx_discard_flush_issued;
Mayank Rana05396b22013-03-16 19:10:11 +0530221 int rx_count_callback;
Mayank Rana55046232011-03-07 10:28:42 +0530222};
223
224#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
225#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
226#define UARTDM_RX_BUF_SIZE 512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227#define RETRY_TIMEOUT 5
Saket Saurabh51690e52012-08-17 14:17:46 +0530228#define UARTDM_NR 256
Mayank Ranaff398d02012-12-18 10:22:50 +0530229#define BAM_PIPE_MIN 0
230#define BAM_PIPE_MAX 11
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530231#define BUS_SCALING 1
232#define BUS_RESET 0
Mayank Rana9c8bda92013-02-28 11:58:04 +0530233#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
Saket Saurabh676247c2013-01-17 15:19:08 +0530234#define BLSP_UART_CLK_FMAX 63160000
Mayank Rana55046232011-03-07 10:28:42 +0530235
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236static struct dentry *debug_base;
Mayank Rana55046232011-03-07 10:28:42 +0530237static struct msm_hs_port q_uart_port[UARTDM_NR];
238static struct platform_driver msm_serial_hs_platform_driver;
239static struct uart_driver msm_hs_driver;
240static struct uart_ops msm_hs_ops;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530241static void msm_hs_start_rx_locked(struct uart_port *uport);
242static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr);
243static void flip_insert_work(struct work_struct *work);
Mayank Rana55046232011-03-07 10:28:42 +0530244
245#define UARTDM_TO_MSM(uart_port) \
246 container_of((uart_port), struct msm_hs_port, uport)
247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
249 char *buf)
Mayank Rana55046232011-03-07 10:28:42 +0530250{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700251 int state = 1;
252 enum msm_hs_clk_states_e clk_state;
253 unsigned long flags;
254
255 struct platform_device *pdev = container_of(dev, struct
256 platform_device, dev);
257 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
258
259 spin_lock_irqsave(&msm_uport->uport.lock, flags);
260 clk_state = msm_uport->clk_state;
261 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
262
263 if (clk_state <= MSM_HS_CLK_OFF)
264 state = 0;
265
Mayank Rana18958b02011-09-28 12:33:36 +0530266 return snprintf(buf, PAGE_SIZE, "%d\n", state);
Mayank Rana55046232011-03-07 10:28:42 +0530267}
268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
270 const char *buf, size_t count)
271{
272 int state;
273 struct platform_device *pdev = container_of(dev, struct
274 platform_device, dev);
275 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
276
277 state = buf[0] - '0';
278 switch (state) {
279 case 0: {
280 msm_hs_request_clock_off(&msm_uport->uport);
281 break;
282 }
283 case 1: {
284 msm_hs_request_clock_on(&msm_uport->uport);
285 break;
286 }
287 default: {
288 return -EINVAL;
289 }
290 }
291 return count;
292}
293
294static DEVICE_ATTR(clock, S_IWUSR | S_IRUGO, show_clock, set_clock);
295
296static inline unsigned int use_low_power_wakeup(struct msm_hs_port *msm_uport)
297{
298 return (msm_uport->wakeup.irq > 0);
299}
300
301static inline int is_gsbi_uart(struct msm_hs_port *msm_uport)
302{
303 /* assume gsbi uart if gsbi resource found in pdata */
304 return ((msm_uport->mapped_gsbi != NULL));
305}
Mayank Ranaff398d02012-12-18 10:22:50 +0530306static unsigned int is_blsp_uart(struct msm_hs_port *msm_uport)
307{
308 return (msm_uport->uart_type == BLSP_HSUART);
309}
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530310
311static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
312{
313 int ret;
314
315 if (is_blsp_uart(msm_uport) && msm_uport->bus_perf_client) {
316 pr_debug("Bus voting:%d\n", vote);
317 ret = msm_bus_scale_client_update_request(
318 msm_uport->bus_perf_client, vote);
319 if (ret)
320 pr_err("%s(): Failed for Bus voting: %d\n",
321 __func__, vote);
322 }
323}
324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325static inline unsigned int msm_hs_read(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +0530326 unsigned int offset)
327{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700328 return readl_relaxed(uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530329}
330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700331static inline void msm_hs_write(struct uart_port *uport, unsigned int offset,
Mayank Rana55046232011-03-07 10:28:42 +0530332 unsigned int value)
333{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700334 writel_relaxed(value, uport->membase + offset);
Mayank Rana55046232011-03-07 10:28:42 +0530335}
336
337static void msm_hs_release_port(struct uart_port *port)
338{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700339 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
340 struct platform_device *pdev = to_platform_device(port->dev);
341 struct resource *gsbi_resource;
342 resource_size_t size;
343
344 if (is_gsbi_uart(msm_uport)) {
345 iowrite32(GSBI_PROTOCOL_IDLE, msm_uport->mapped_gsbi +
346 GSBI_CONTROL_ADDR);
347 gsbi_resource = platform_get_resource_byname(pdev,
348 IORESOURCE_MEM,
349 "gsbi_resource");
Mayank Rana53a2c772011-11-01 14:29:14 +0530350 if (unlikely(!gsbi_resource))
351 return;
352
353 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700354 release_mem_region(gsbi_resource->start, size);
355 iounmap(msm_uport->mapped_gsbi);
356 msm_uport->mapped_gsbi = NULL;
357 }
Mayank Rana55046232011-03-07 10:28:42 +0530358}
359
360static int msm_hs_request_port(struct uart_port *port)
361{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700362 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(port);
363 struct platform_device *pdev = to_platform_device(port->dev);
364 struct resource *gsbi_resource;
365 resource_size_t size;
Mayank Rana55046232011-03-07 10:28:42 +0530366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 gsbi_resource = platform_get_resource_byname(pdev,
368 IORESOURCE_MEM,
369 "gsbi_resource");
370 if (gsbi_resource) {
Mayank Rana53a2c772011-11-01 14:29:14 +0530371 size = resource_size(gsbi_resource);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700372 if (unlikely(!request_mem_region(gsbi_resource->start, size,
373 "msm_serial_hs")))
374 return -EBUSY;
375 msm_uport->mapped_gsbi = ioremap(gsbi_resource->start,
376 size);
377 if (!msm_uport->mapped_gsbi) {
378 release_mem_region(gsbi_resource->start, size);
379 return -EBUSY;
380 }
381 }
382 /* no gsbi uart */
Mayank Rana55046232011-03-07 10:28:42 +0530383 return 0;
384}
385
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700386static int msm_serial_loopback_enable_set(void *data, u64 val)
387{
388 struct msm_hs_port *msm_uport = data;
389 struct uart_port *uport = &(msm_uport->uport);
390 unsigned long flags;
391 int ret = 0;
392
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530393 msm_hs_bus_voting(msm_uport, BUS_SCALING);
394
Mayank Ranacb589d82012-03-01 11:50:03 +0530395 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700396 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530397 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700398
399 if (val) {
400 spin_lock_irqsave(&uport->lock, flags);
401 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530402 if (is_blsp_uart(msm_uport))
403 ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
404 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
405 else
406 ret |= UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700407 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
408 spin_unlock_irqrestore(&uport->lock, flags);
409 } else {
410 spin_lock_irqsave(&uport->lock, flags);
411 ret = msm_hs_read(uport, UARTDM_MR2_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530412 if (is_blsp_uart(msm_uport))
413 ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
414 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
415 else
416 ret &= ~UARTDM_MR2_LOOP_MODE_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700417 msm_hs_write(uport, UARTDM_MR2_ADDR, ret);
418 spin_unlock_irqrestore(&uport->lock, flags);
419 }
420 /* Calling CLOCK API. Hence mb() requires here. */
421 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +0530422 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530424 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700425
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530426 msm_hs_bus_voting(msm_uport, BUS_RESET);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700427 return 0;
428}
429
430static int msm_serial_loopback_enable_get(void *data, u64 *val)
431{
432 struct msm_hs_port *msm_uport = data;
433 struct uart_port *uport = &(msm_uport->uport);
434 unsigned long flags;
435 int ret = 0;
436
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530437 msm_hs_bus_voting(msm_uport, BUS_SCALING);
438
Mayank Ranacb589d82012-03-01 11:50:03 +0530439 clk_prepare_enable(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700440 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530441 clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442
443 spin_lock_irqsave(&uport->lock, flags);
444 ret = msm_hs_read(&msm_uport->uport, UARTDM_MR2_ADDR);
445 spin_unlock_irqrestore(&uport->lock, flags);
446
Mayank Ranacb589d82012-03-01 11:50:03 +0530447 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +0530449 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450
451 *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
Mayank Ranae4bc7de2013-01-22 12:51:16 +0530452
453 msm_hs_bus_voting(msm_uport, BUS_RESET);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454 return 0;
455}
456DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
457 msm_serial_loopback_enable_set, "%llu\n");
458
459/*
460 * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
461 * writing 1 turns on internal loopback mode in HW. Useful for automation
462 * test scripts.
463 * writing 0 disables the internal loopback mode. Default is disabled.
464 */
Stephen Boyd7bce0972012-04-25 11:54:27 -0700465static void __devinit msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700466 int id)
467{
468 char node_name[15];
469 snprintf(node_name, sizeof(node_name), "loopback.%d", id);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530470 msm_uport->loopback_dir = debugfs_create_file(node_name,
471 S_IRUGO | S_IWUSR,
472 debug_base,
473 msm_uport,
474 &loopback_enable_fops);
475
476 if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
477 pr_err("%s(): Cannot create loopback.%d debug entry",
478 __func__, id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479}
480
Mayank Rana55046232011-03-07 10:28:42 +0530481static int __devexit msm_hs_remove(struct platform_device *pdev)
482{
483
484 struct msm_hs_port *msm_uport;
485 struct device *dev;
486
487 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
488 printk(KERN_ERR "Invalid plaform device ID = %d\n", pdev->id);
489 return -EINVAL;
490 }
491
492 msm_uport = &q_uart_port[pdev->id];
493 dev = msm_uport->uport.dev;
494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
Mayank Rana17e0e1a2012-04-07 02:10:33 +0530496 debugfs_remove(msm_uport->loopback_dir);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700497
Mayank Rana55046232011-03-07 10:28:42 +0530498 dma_unmap_single(dev, msm_uport->rx.mapped_cmd_ptr, sizeof(dmov_box),
499 DMA_TO_DEVICE);
500 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
501 msm_uport->rx.rbuffer);
502 dma_pool_destroy(msm_uport->rx.pool);
503
Mayank Rana8431de82011-12-08 09:06:08 +0530504 dma_unmap_single(dev, msm_uport->rx.cmdptr_dmaaddr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530505 DMA_TO_DEVICE);
Mayank Rana8431de82011-12-08 09:06:08 +0530506 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr_ptr, sizeof(u32),
Mayank Rana55046232011-03-07 10:28:42 +0530507 DMA_TO_DEVICE);
508 dma_unmap_single(dev, msm_uport->tx.mapped_cmd_ptr, sizeof(dmov_box),
509 DMA_TO_DEVICE);
510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 wake_lock_destroy(&msm_uport->rx.wake_lock);
512 wake_lock_destroy(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +0530513 destroy_workqueue(msm_uport->hsuart_wq);
514 mutex_destroy(&msm_uport->clk_mutex);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700515
Mayank Rana55046232011-03-07 10:28:42 +0530516 uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
517 clk_put(msm_uport->clk);
Mayank Ranacb589d82012-03-01 11:50:03 +0530518 if (msm_uport->pclk)
519 clk_put(msm_uport->pclk);
Mayank Rana55046232011-03-07 10:28:42 +0530520
521 /* Free the tx resources */
522 kfree(msm_uport->tx.command_ptr);
523 kfree(msm_uport->tx.command_ptr_ptr);
524
525 /* Free the rx resources */
526 kfree(msm_uport->rx.command_ptr);
527 kfree(msm_uport->rx.command_ptr_ptr);
528
529 iounmap(msm_uport->uport.membase);
530
531 return 0;
532}
533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700534static int msm_hs_init_clk(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530535{
536 int ret;
537 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
538
Mayank Rana55046232011-03-07 10:28:42 +0530539 /* Set up the MREG/NREG/DREG/MNDREG */
540 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
541 if (ret) {
542 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Rana55046232011-03-07 10:28:42 +0530543 return ret;
544 }
545
Mayank Ranacb589d82012-03-01 11:50:03 +0530546 ret = clk_prepare_enable(msm_uport->clk);
Mayank Rana55046232011-03-07 10:28:42 +0530547 if (ret) {
548 printk(KERN_ERR "Error could not turn on UART clk\n");
549 return ret;
550 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700551 if (msm_uport->pclk) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530552 ret = clk_prepare_enable(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 if (ret) {
Mayank Ranacb589d82012-03-01 11:50:03 +0530554 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700555 dev_err(uport->dev,
556 "Error could not turn on UART pclk\n");
557 return ret;
558 }
Mayank Rana55046232011-03-07 10:28:42 +0530559 }
560
561 msm_uport->clk_state = MSM_HS_CLK_ON;
562 return 0;
563}
564
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530565
566/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
567 *
568 * Also registers a SPS callback function for the consumer
569 * process with the SPS driver
570 *
571 * @uport - Pointer to uart uport structure
572 *
573 * @return - 0 if successful else negative value.
574 *
575 */
576
577static int msm_hs_spsconnect_tx(struct uart_port *uport)
578{
579 int ret;
580 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
581 struct msm_hs_tx *tx = &msm_uport->tx;
582 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
583 struct sps_connect *sps_config = &tx->cons.config;
584 struct sps_register_event *sps_event = &tx->cons.event;
585
586 /* Establish connection between peripheral and memory endpoint */
587 ret = sps_connect(sps_pipe_handle, sps_config);
588 if (ret) {
589 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
590 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
591 return ret;
592 }
593 /* Register callback event for EOT (End of transfer) event. */
594 ret = sps_register_event(sps_pipe_handle, sps_event);
595 if (ret) {
596 pr_err("msm_serial_hs: sps_connect() failed for tx!!\n"
597 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
598 goto reg_event_err;
599 }
600 return 0;
601
602reg_event_err:
603 sps_disconnect(sps_pipe_handle);
604 return ret;
605}
606
607/* Connect a UART peripheral's SPS endpoint(producer endpoint)
608 *
609 * Also registers a SPS callback function for the producer
610 * process with the SPS driver
611 *
612 * @uport - Pointer to uart uport structure
613 *
614 * @return - 0 if successful else negative value.
615 *
616 */
617
618static int msm_hs_spsconnect_rx(struct uart_port *uport)
619{
620 int ret;
621 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
622 struct msm_hs_rx *rx = &msm_uport->rx;
623 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
624 struct sps_connect *sps_config = &rx->prod.config;
625 struct sps_register_event *sps_event = &rx->prod.event;
626
627 /* Establish connection between peripheral and memory endpoint */
628 ret = sps_connect(sps_pipe_handle, sps_config);
629 if (ret) {
630 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
631 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
632 return ret;
633 }
634 /* Register callback event for EOT (End of transfer) event. */
635 ret = sps_register_event(sps_pipe_handle, sps_event);
636 if (ret) {
637 pr_err("msm_serial_hs: sps_connect() failed for rx!!\n"
638 "pipe_handle=0x%x ret=%d", (u32)sps_pipe_handle, ret);
639 goto reg_event_err;
640 }
641 return 0;
642
643reg_event_err:
644 sps_disconnect(sps_pipe_handle);
645 return ret;
646}
647
Mayank Rana55046232011-03-07 10:28:42 +0530648/*
649 * programs the UARTDM_CSR register with correct bit rates
650 *
651 * Interrupts should be disabled before we are called, as
652 * we modify Set Baud rate
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700653 * Set receive stale interrupt level, dependant on Bit Rate
Mayank Rana55046232011-03-07 10:28:42 +0530654 * Goal is to have around 8 ms before indicate stale.
655 * roundup (((Bit Rate * .008) / 10) + 1
656 */
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530657static void msm_hs_set_bps_locked(struct uart_port *uport,
658 unsigned int bps)
Mayank Rana55046232011-03-07 10:28:42 +0530659{
660 unsigned long rxstale;
661 unsigned long data;
662 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
663
664 switch (bps) {
665 case 300:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700666 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x00);
Mayank Rana55046232011-03-07 10:28:42 +0530667 rxstale = 1;
668 break;
669 case 600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700670 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x11);
Mayank Rana55046232011-03-07 10:28:42 +0530671 rxstale = 1;
672 break;
673 case 1200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700674 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x22);
Mayank Rana55046232011-03-07 10:28:42 +0530675 rxstale = 1;
676 break;
677 case 2400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700678 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x33);
Mayank Rana55046232011-03-07 10:28:42 +0530679 rxstale = 1;
680 break;
681 case 4800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700682 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x44);
Mayank Rana55046232011-03-07 10:28:42 +0530683 rxstale = 1;
684 break;
685 case 9600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700686 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x55);
Mayank Rana55046232011-03-07 10:28:42 +0530687 rxstale = 2;
688 break;
689 case 14400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700690 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x66);
Mayank Rana55046232011-03-07 10:28:42 +0530691 rxstale = 3;
692 break;
693 case 19200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x77);
Mayank Rana55046232011-03-07 10:28:42 +0530695 rxstale = 4;
696 break;
697 case 28800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x88);
Mayank Rana55046232011-03-07 10:28:42 +0530699 rxstale = 6;
700 break;
701 case 38400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700702 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
Mayank Rana55046232011-03-07 10:28:42 +0530703 rxstale = 8;
704 break;
705 case 57600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
Mayank Rana55046232011-03-07 10:28:42 +0530707 rxstale = 16;
708 break;
709 case 76800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
Mayank Rana55046232011-03-07 10:28:42 +0530711 rxstale = 16;
712 break;
713 case 115200:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700714 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
Mayank Rana55046232011-03-07 10:28:42 +0530715 rxstale = 31;
716 break;
717 case 230400:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
Mayank Rana55046232011-03-07 10:28:42 +0530719 rxstale = 31;
720 break;
721 case 460800:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700722 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530723 rxstale = 31;
724 break;
725 case 4000000:
726 case 3686400:
727 case 3200000:
728 case 3500000:
729 case 3000000:
730 case 2500000:
731 case 1500000:
732 case 1152000:
733 case 1000000:
734 case 921600:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700735 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530736 rxstale = 31;
737 break;
738 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700739 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
Mayank Rana55046232011-03-07 10:28:42 +0530740 /* default to 9600 */
741 bps = 9600;
742 rxstale = 2;
743 break;
744 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700745 /*
746 * uart baud rate depends on CSR and MND Values
747 * we are updating CSR before and then calling
748 * clk_set_rate which updates MND Values. Hence
749 * dsb requires here.
750 */
751 mb();
752 if (bps > 460800) {
Mayank Rana55046232011-03-07 10:28:42 +0530753 uport->uartclk = bps * 16;
Saket Saurabh676247c2013-01-17 15:19:08 +0530754 if (is_blsp_uart(msm_uport)) {
755 /* BLSP based UART supports maximum clock frequency
756 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
757 * UART can support baud rate of 3.94 Mbps which is
758 * equivalent to 4 Mbps.
759 * UART hardware is robust enough to handle this
760 * deviation to achieve baud rate ~4 Mbps.
761 */
762 if (bps == 4000000)
763 uport->uartclk = BLSP_UART_CLK_FMAX;
764 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700765 } else {
766 uport->uartclk = 7372800;
767 }
Mayank Ranae6725162012-08-22 17:44:25 +0530768
Mayank Rana55046232011-03-07 10:28:42 +0530769 if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
770 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Ranae6725162012-08-22 17:44:25 +0530771 WARN_ON(1);
Mayank Rana55046232011-03-07 10:28:42 +0530772 }
773
774 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
775 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
776
777 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530778 /*
779 * It is suggested to do reset of transmitter and receiver after
780 * changing any protocol configuration. Here Baud rate and stale
781 * timeout are getting updated. Hence reset transmitter and receiver.
782 */
783 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
784 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
Mayank Rana55046232011-03-07 10:28:42 +0530785}
786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787
788static void msm_hs_set_std_bps_locked(struct uart_port *uport,
789 unsigned int bps)
790{
791 unsigned long rxstale;
792 unsigned long data;
793
794 switch (bps) {
795 case 9600:
796 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
797 rxstale = 2;
798 break;
799 case 14400:
800 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xaa);
801 rxstale = 3;
802 break;
803 case 19200:
804 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xbb);
805 rxstale = 4;
806 break;
807 case 28800:
808 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xcc);
809 rxstale = 6;
810 break;
811 case 38400:
812 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xdd);
813 rxstale = 8;
814 break;
815 case 57600:
816 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xee);
817 rxstale = 16;
818 break;
819 case 115200:
820 msm_hs_write(uport, UARTDM_CSR_ADDR, 0xff);
821 rxstale = 31;
822 break;
823 default:
824 msm_hs_write(uport, UARTDM_CSR_ADDR, 0x99);
825 /* default to 9600 */
826 bps = 9600;
827 rxstale = 2;
828 break;
829 }
830
831 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
832 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
833
834 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530835}
836
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530837
Mayank Rana55046232011-03-07 10:28:42 +0530838/*
839 * termios : new ktermios
840 * oldtermios: old ktermios previous setting
841 *
842 * Configure the serial port
843 */
844static void msm_hs_set_termios(struct uart_port *uport,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 struct ktermios *termios,
846 struct ktermios *oldtermios)
Mayank Rana55046232011-03-07 10:28:42 +0530847{
848 unsigned int bps;
849 unsigned long data;
Mayank Rana9c8bda92013-02-28 11:58:04 +0530850 int ret;
Mayank Rana55046232011-03-07 10:28:42 +0530851 unsigned int c_cflag = termios->c_cflag;
852 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530853 struct msm_hs_rx *rx = &msm_uport->rx;
854 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +0530855
Mayank Ranae6725162012-08-22 17:44:25 +0530856 mutex_lock(&msm_uport->clk_mutex);
Saket Saurabha8bd52e2013-02-15 12:50:27 +0530857 msm_hs_write(uport, UARTDM_IMR_ADDR, 0);
Mayank Rana55046232011-03-07 10:28:42 +0530858
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530859 /*
860 * Disable Rx channel of UARTDM
861 * DMA Rx Stall happens if enqueue and flush of Rx command happens
862 * concurrently. Hence before changing the baud rate/protocol
863 * configuration and sending flush command to ADM, disable the Rx
864 * channel of UARTDM.
865 * Note: should not reset the receiver here immediately as it is not
866 * suggested to do disable/reset or reset/disable at the same time.
867 */
868 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530869 if (is_blsp_uart(msm_uport)) {
870 /* Disable UARTDM RX BAM Interface */
871 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
872 } else {
873 data &= ~UARTDM_RX_DM_EN_BMSK;
874 }
875
Mayank Rana2d4d2f62011-07-21 17:31:31 +0530876 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +0530877
878 /* 300 is the minimum baud support by the driver */
879 bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
880
881 /* Temporary remapping 200 BAUD to 3.2 mbps */
882 if (bps == 200)
883 bps = 3200000;
884
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700885 uport->uartclk = clk_get_rate(msm_uport->clk);
886 if (!uport->uartclk)
887 msm_hs_set_std_bps_locked(uport, bps);
888 else
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530889 msm_hs_set_bps_locked(uport, bps);
Mayank Rana55046232011-03-07 10:28:42 +0530890
891 data = msm_hs_read(uport, UARTDM_MR2_ADDR);
892 data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
893 /* set parity */
894 if (PARENB == (c_cflag & PARENB)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 if (PARODD == (c_cflag & PARODD)) {
Mayank Rana55046232011-03-07 10:28:42 +0530896 data |= ODD_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 } else if (CMSPAR == (c_cflag & CMSPAR)) {
Mayank Rana55046232011-03-07 10:28:42 +0530898 data |= SPACE_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700899 } else {
Mayank Rana55046232011-03-07 10:28:42 +0530900 data |= EVEN_PARITY;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 }
Mayank Rana55046232011-03-07 10:28:42 +0530902 }
903
904 /* Set bits per char */
905 data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
906
907 switch (c_cflag & CSIZE) {
908 case CS5:
909 data |= FIVE_BPC;
910 break;
911 case CS6:
912 data |= SIX_BPC;
913 break;
914 case CS7:
915 data |= SEVEN_BPC;
916 break;
917 default:
918 data |= EIGHT_BPC;
919 break;
920 }
921 /* stop bits */
922 if (c_cflag & CSTOPB) {
923 data |= STOP_BIT_TWO;
924 } else {
925 /* otherwise 1 stop bit */
926 data |= STOP_BIT_ONE;
927 }
928 data |= UARTDM_MR2_ERROR_MODE_BMSK;
929 /* write parity/bits per char/stop bit configuration */
930 msm_hs_write(uport, UARTDM_MR2_ADDR, data);
931
932 /* Configure HW flow control */
933 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
934
935 data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
936
937 if (c_cflag & CRTSCTS) {
938 data |= UARTDM_MR1_CTS_CTL_BMSK;
939 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
940 }
941
942 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
943
944 uport->ignore_status_mask = termios->c_iflag & INPCK;
945 uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
Mayank Ranaadc41562013-01-04 12:44:01 +0530946 uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
Mayank Rana85aeee12012-11-27 14:49:46 +0530947
Mayank Rana55046232011-03-07 10:28:42 +0530948 uport->read_status_mask = (termios->c_cflag & CREAD);
949
Mayank Rana55046232011-03-07 10:28:42 +0530950
951 /* Set Transmit software time out */
952 uart_update_timeout(uport, c_cflag, bps);
953
954 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
955 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
956
957 if (msm_uport->rx.flush == FLUSH_NONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700958 wake_lock(&msm_uport->rx.wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +0530959 msm_uport->rx.flush = FLUSH_IGNORE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 /*
961 * Before using dmov APIs make sure that
962 * previous writel are completed. Hence
963 * dsb requires here.
964 */
965 mb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530966 if (is_blsp_uart(msm_uport)) {
967 sps_disconnect(sps_pipe_handle);
968 msm_hs_spsconnect_rx(uport);
969 msm_serial_hs_rx_tlet((unsigned long) &rx->tlet);
970 } else {
Mayank Rana9c8bda92013-02-28 11:58:04 +0530971 msm_uport->rx_discard_flush_issued = true;
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530972 /* do discard flush */
973 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
Mayank Rana9c8bda92013-02-28 11:58:04 +0530974 pr_debug("%s(): wainting for flush completion.\n",
975 __func__);
976 ret = wait_event_timeout(msm_uport->rx.wait,
977 msm_uport->rx_discard_flush_issued == false,
978 RX_FLUSH_COMPLETE_TIMEOUT);
979 if (!ret)
980 pr_err("%s(): Discard flush pending.\n",
981 __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +0530982 }
Mayank Rana55046232011-03-07 10:28:42 +0530983 }
984
985 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 mb();
Mayank Ranae6725162012-08-22 17:44:25 +0530987 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +0530988}
989
990/*
991 * Standard API, Transmitter
992 * Any character in the transmit shift register is sent
993 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700994unsigned int msm_hs_tx_empty(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +0530995{
996 unsigned int data;
997 unsigned int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +0530998
999 data = msm_hs_read(uport, UARTDM_SR_ADDR);
1000 if (data & UARTDM_SR_TXEMT_BMSK)
1001 ret = TIOCSER_TEMT;
1002
Mayank Rana55046232011-03-07 10:28:42 +05301003 return ret;
1004}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005EXPORT_SYMBOL(msm_hs_tx_empty);
Mayank Rana55046232011-03-07 10:28:42 +05301006
1007/*
1008 * Standard API, Stop transmitter.
1009 * Any character in the transmit shift register is sent as
1010 * well as the current data mover transfer .
1011 */
1012static void msm_hs_stop_tx_locked(struct uart_port *uport)
1013{
1014 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1015
1016 msm_uport->tx.tx_ready_int_en = 0;
1017}
1018
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301019/* Disconnect BAM RX Endpoint Pipe Index from workqueue context*/
1020static void hsuart_disconnect_rx_endpoint_work(struct work_struct *w)
1021{
1022 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
1023 disconnect_rx_endpoint);
1024 struct msm_hs_rx *rx = &msm_uport->rx;
1025 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
1026
1027 sps_disconnect(sps_pipe_handle);
1028 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
1029 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1030 wake_up(&msm_uport->rx.wait);
1031}
1032
Mayank Rana55046232011-03-07 10:28:42 +05301033/*
1034 * Standard API, Stop receiver as soon as possible.
1035 *
1036 * Function immediately terminates the operation of the
1037 * channel receiver and any incoming characters are lost. None
1038 * of the receiver status bits are affected by this command and
1039 * characters that are already in the receive FIFO there.
1040 */
1041static void msm_hs_stop_rx_locked(struct uart_port *uport)
1042{
1043 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1044 unsigned int data;
1045
Mayank Rana55046232011-03-07 10:28:42 +05301046 /* disable dlink */
1047 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301048 if (is_blsp_uart(msm_uport))
1049 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1050 else
1051 data &= ~UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05301052 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
1053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001054 /* calling DMOV or CLOCK API. Hence mb() */
1055 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301056 /* Disable the receiver */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001057 if (msm_uport->rx.flush == FLUSH_NONE) {
1058 wake_lock(&msm_uport->rx.wake_lock);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301059 if (is_blsp_uart(msm_uport)) {
1060 msm_uport->rx.flush = FLUSH_STOP;
1061 /* workqueue for BAM rx endpoint disconnect */
1062 queue_work(msm_uport->hsuart_wq,
1063 &msm_uport->disconnect_rx_endpoint);
1064 } else {
1065 /* do discard flush */
1066 msm_dmov_flush(msm_uport->dma_rx_channel, 0);
1067 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001068 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301069 if (!is_blsp_uart(msm_uport) && msm_uport->rx.flush != FLUSH_SHUTDOWN)
Mayank Rana55046232011-03-07 10:28:42 +05301070 msm_uport->rx.flush = FLUSH_STOP;
Saket Saurabh8b6b6af2013-02-19 16:04:16 +05301071
Mayank Rana55046232011-03-07 10:28:42 +05301072}
1073
1074/* Transmit the next chunk of data */
1075static void msm_hs_submit_tx_locked(struct uart_port *uport)
1076{
1077 int left;
1078 int tx_count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079 int aligned_tx_count;
Mayank Rana55046232011-03-07 10:28:42 +05301080 dma_addr_t src_addr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001081 dma_addr_t aligned_src_addr;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301082 u32 flags = SPS_IOVEC_FLAG_EOT;
Mayank Rana55046232011-03-07 10:28:42 +05301083 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1084 struct msm_hs_tx *tx = &msm_uport->tx;
1085 struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301086 struct sps_pipe *sps_pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05301087
1088 if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
1089 msm_hs_stop_tx_locked(uport);
1090 return;
1091 }
1092
1093 tx->dma_in_flight = 1;
1094
1095 tx_count = uart_circ_chars_pending(tx_buf);
1096
1097 if (UARTDM_TX_BUF_SIZE < tx_count)
1098 tx_count = UARTDM_TX_BUF_SIZE;
1099
1100 left = UART_XMIT_SIZE - tx_buf->tail;
1101
1102 if (tx_count > left)
1103 tx_count = left;
1104
1105 src_addr = tx->dma_base + tx_buf->tail;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 /* Mask the src_addr to align on a cache
1107 * and add those bytes to tx_count */
1108 aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
1109 aligned_tx_count = tx_count + src_addr - aligned_src_addr;
1110
1111 dma_sync_single_for_device(uport->dev, aligned_src_addr,
1112 aligned_tx_count, DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301113
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301114 if (is_blsp_uart(msm_uport)) {
1115 /* Issue TX BAM Start IFC command */
1116 msm_hs_write(uport, UARTDM_CR_ADDR, START_TX_BAM_IFC);
1117 } else {
1118 tx->command_ptr->num_rows =
1119 (((tx_count + 15) >> 4) << 16) |
1120 ((tx_count + 15) >> 4);
1121 tx->command_ptr->src_row_addr = src_addr;
Mayank Rana55046232011-03-07 10:28:42 +05301122
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301123 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr,
1124 sizeof(dmov_box), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05301125
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301126 *tx->command_ptr_ptr = CMD_PTR_LP |
1127 DMOV_CMD_ADDR(tx->mapped_cmd_ptr);
1128 }
Mayank Rana55046232011-03-07 10:28:42 +05301129
Mayank Rana55046232011-03-07 10:28:42 +05301130 /* Save tx_count to use in Callback */
1131 tx->tx_count = tx_count;
1132 msm_hs_write(uport, UARTDM_NCF_TX_ADDR, tx_count);
1133
1134 /* Disable the tx_ready interrupt */
1135 msm_uport->imr_reg &= ~UARTDM_ISR_TX_READY_BMSK;
1136 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001137 /* Calling next DMOV API. Hence mb() here. */
1138 mb();
1139
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301140 msm_uport->tx.flush = FLUSH_NONE;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301141
1142 if (is_blsp_uart(msm_uport)) {
1143 sps_pipe_handle = tx->cons.pipe_handle;
1144 /* Queue transfer request to SPS */
1145 sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
1146 msm_uport, flags);
1147 } else {
1148 dma_sync_single_for_device(uport->dev, tx->mapped_cmd_ptr_ptr,
1149 sizeof(u32), DMA_TO_DEVICE);
1150
1151 msm_dmov_enqueue_cmd(msm_uport->dma_tx_channel, &tx->xfer);
1152 }
Mayank Rana55046232011-03-07 10:28:42 +05301153}
1154
1155/* Start to receive the next chunk of data */
1156static void msm_hs_start_rx_locked(struct uart_port *uport)
1157{
1158 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301159 struct msm_hs_rx *rx = &msm_uport->rx;
1160 struct sps_pipe *sps_pipe_handle;
1161 u32 flags = SPS_IOVEC_FLAG_EOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162 unsigned int buffer_pending = msm_uport->rx.buffer_pending;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301163 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164
1165 msm_uport->rx.buffer_pending = 0;
1166 if (buffer_pending && hs_serial_debug_mask)
1167 printk(KERN_ERR "Error: rx started in buffer state = %x",
1168 buffer_pending);
Mayank Rana55046232011-03-07 10:28:42 +05301169
1170 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
1171 msm_hs_write(uport, UARTDM_DMRX_ADDR, UARTDM_RX_BUF_SIZE);
1172 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_ENABLE);
1173 msm_uport->imr_reg |= UARTDM_ISR_RXLEV_BMSK;
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301174
1175 /*
1176 * Enable UARTDM Rx Interface as previously it has been
1177 * disable in set_termios before configuring baud rate.
1178 */
1179 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301180 if (is_blsp_uart(msm_uport)) {
1181 /* Enable UARTDM Rx BAM Interface */
1182 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1183 } else {
1184 data |= UARTDM_RX_DM_EN_BMSK;
1185 }
1186
Mayank Rana2d4d2f62011-07-21 17:31:31 +05301187 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Mayank Rana55046232011-03-07 10:28:42 +05301188 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189 /* Calling next DMOV API. Hence mb() here. */
1190 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301191
Mayank Rana05396b22013-03-16 19:10:11 +05301192 if (is_blsp_uart(msm_uport)) {
1193 /*
1194 * RX-transfer will be automatically re-activated
1195 * after last data of previous transfer was read.
1196 */
1197 data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
1198 RX_DMRX_CYCLIC_EN);
1199 msm_hs_write(uport, UARTDM_RX_TRANS_CTRL_ADDR, data);
1200 /* Issue RX BAM Start IFC command */
1201 msm_hs_write(uport, UARTDM_CR_ADDR, START_RX_BAM_IFC);
1202 mb();
1203 }
1204
Mayank Rana55046232011-03-07 10:28:42 +05301205 msm_uport->rx.flush = FLUSH_NONE;
Mayank Rana55046232011-03-07 10:28:42 +05301206
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301207 if (is_blsp_uart(msm_uport)) {
1208 sps_pipe_handle = rx->prod.pipe_handle;
1209 /* Queue transfer request to SPS */
1210 sps_transfer_one(sps_pipe_handle, rx->rbuffer,
1211 UARTDM_RX_BUF_SIZE, msm_uport, flags);
1212 } else {
1213 msm_dmov_enqueue_cmd(msm_uport->dma_rx_channel,
1214 &msm_uport->rx.xfer);
1215 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216}
1217
1218static void flip_insert_work(struct work_struct *work)
1219{
1220 unsigned long flags;
1221 int retval;
1222 struct msm_hs_port *msm_uport =
1223 container_of(work, struct msm_hs_port,
1224 rx.flip_insert_work.work);
1225 struct tty_struct *tty = msm_uport->uport.state->port.tty;
1226
1227 spin_lock_irqsave(&msm_uport->uport.lock, flags);
1228 if (msm_uport->rx.buffer_pending == NONE_PENDING) {
1229 if (hs_serial_debug_mask)
1230 printk(KERN_ERR "Error: No buffer pending in %s",
1231 __func__);
1232 return;
1233 }
1234 if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
1235 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1236 if (retval)
1237 msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
1238 }
1239 if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
1240 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1241 if (retval)
1242 msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
1243 }
1244 if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
1245 int rx_count, rx_offset;
1246 rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
1247 rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
1248 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer +
1249 rx_offset, rx_count);
1250 msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
1251 PARITY_ERROR);
1252 if (retval != rx_count)
1253 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1254 retval << 8 | (rx_count - retval) << 16;
1255 }
1256 if (msm_uport->rx.buffer_pending)
1257 schedule_delayed_work(&msm_uport->rx.flip_insert_work,
1258 msecs_to_jiffies(RETRY_TIMEOUT));
1259 else
1260 if ((msm_uport->clk_state == MSM_HS_CLK_ON) &&
1261 (msm_uport->rx.flush <= FLUSH_IGNORE)) {
1262 if (hs_serial_debug_mask)
1263 printk(KERN_WARNING
1264 "msm_serial_hs: "
1265 "Pending buffers cleared. "
1266 "Restarting\n");
1267 msm_hs_start_rx_locked(&msm_uport->uport);
1268 }
1269 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1270 tty_flip_buffer_push(tty);
1271}
1272
1273static void msm_serial_hs_rx_tlet(unsigned long tlet_ptr)
1274{
1275 int retval;
1276 int rx_count;
1277 unsigned long status;
1278 unsigned long flags;
1279 unsigned int error_f = 0;
1280 struct uart_port *uport;
1281 struct msm_hs_port *msm_uport;
1282 unsigned int flush;
1283 struct tty_struct *tty;
Mayank Rana05396b22013-03-16 19:10:11 +05301284 struct sps_event_notify *notify;
1285 struct msm_hs_rx *rx;
1286 struct sps_pipe *sps_pipe_handle;
1287 u32 sps_flags = SPS_IOVEC_FLAG_EOT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001288
1289 msm_uport = container_of((struct tasklet_struct *)tlet_ptr,
1290 struct msm_hs_port, rx.tlet);
1291 uport = &msm_uport->uport;
1292 tty = uport->state->port.tty;
Mayank Rana05396b22013-03-16 19:10:11 +05301293 notify = &msm_uport->notify;
1294 rx = &msm_uport->rx;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001295
1296 status = msm_hs_read(uport, UARTDM_SR_ADDR);
1297
1298 spin_lock_irqsave(&uport->lock, flags);
1299
Mayank Rana05396b22013-03-16 19:10:11 +05301300 if (!is_blsp_uart(msm_uport))
1301 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001302
1303 /* overflow is not connect to data in a FIFO */
1304 if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
1305 (uport->read_status_mask & CREAD))) {
1306 retval = tty_insert_flip_char(tty, 0, TTY_OVERRUN);
1307 if (!retval)
1308 msm_uport->rx.buffer_pending |= TTY_OVERRUN;
1309 uport->icount.buf_overrun++;
1310 error_f = 1;
1311 }
1312
1313 if (!(uport->ignore_status_mask & INPCK))
1314 status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
1315
1316 if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
1317 /* Can not tell difference between parity & frame error */
Mayank Rana85aeee12012-11-27 14:49:46 +05301318 if (hs_serial_debug_mask)
1319 printk(KERN_WARNING "msm_serial_hs: parity error\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 uport->icount.parity++;
1321 error_f = 1;
Mayank Rana85aeee12012-11-27 14:49:46 +05301322 if (!(uport->ignore_status_mask & IGNPAR)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001323 retval = tty_insert_flip_char(tty, 0, TTY_PARITY);
1324 if (!retval)
1325 msm_uport->rx.buffer_pending |= TTY_PARITY;
1326 }
1327 }
1328
Mayank Rana85aeee12012-11-27 14:49:46 +05301329 if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
1330 if (hs_serial_debug_mask)
1331 printk(KERN_WARNING "msm_serial_hs: Rx break\n");
1332 uport->icount.brk++;
1333 error_f = 1;
1334 if (!(uport->ignore_status_mask & IGNBRK)) {
1335 retval = tty_insert_flip_char(tty, 0, TTY_BREAK);
1336 if (!retval)
1337 msm_uport->rx.buffer_pending |= TTY_BREAK;
1338 }
1339 }
1340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341 if (error_f)
1342 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
1343
1344 if (msm_uport->clk_req_off_state == CLK_REQ_OFF_FLUSH_ISSUED)
1345 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_FLUSHED;
1346 flush = msm_uport->rx.flush;
1347 if (flush == FLUSH_IGNORE)
1348 if (!msm_uport->rx.buffer_pending)
1349 msm_hs_start_rx_locked(uport);
1350
1351 if (flush == FLUSH_STOP) {
1352 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1353 wake_up(&msm_uport->rx.wait);
1354 }
1355 if (flush >= FLUSH_DATA_INVALID)
1356 goto out;
1357
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301358 if (is_blsp_uart(msm_uport)) {
Mayank Rana05396b22013-03-16 19:10:11 +05301359 rx_count = msm_uport->rx_count_callback;
1360 } else {
1361 rx_count = msm_hs_read(uport, UARTDM_RX_TOTAL_SNAP_ADDR);
1362 /* order the read of rx.buffer */
1363 rmb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301364 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001365
1366 if (0 != (uport->read_status_mask & CREAD)) {
1367 retval = tty_insert_flip_string(tty, msm_uport->rx.buffer,
1368 rx_count);
1369 if (retval != rx_count) {
1370 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1371 retval << 5 | (rx_count - retval) << 16;
1372 }
1373 }
1374
1375 /* order the read of rx.buffer and the start of next rx xfer */
1376 wmb();
1377
Mayank Rana05396b22013-03-16 19:10:11 +05301378 if (!msm_uport->rx.buffer_pending) {
1379 if (is_blsp_uart(msm_uport)) {
1380 msm_uport->rx.flush = FLUSH_NONE;
1381 sps_pipe_handle = rx->prod.pipe_handle;
1382 /* Queue transfer request to SPS */
1383 sps_transfer_one(sps_pipe_handle, rx->rbuffer,
1384 UARTDM_RX_BUF_SIZE, msm_uport, sps_flags);
1385 } else {
1386 msm_hs_start_rx_locked(uport);
1387 }
1388 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001389out:
1390 if (msm_uport->rx.buffer_pending) {
1391 if (hs_serial_debug_mask)
1392 printk(KERN_WARNING
1393 "msm_serial_hs: "
1394 "tty buffer exhausted. "
1395 "Stalling\n");
1396 schedule_delayed_work(&msm_uport->rx.flip_insert_work
1397 , msecs_to_jiffies(RETRY_TIMEOUT));
1398 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001399 /* release wakelock in 500ms, not immediately, because higher layers
1400 * don't always take wakelocks when they should */
1401 wake_lock_timeout(&msm_uport->rx.wake_lock, HZ / 2);
1402 /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
1403 spin_unlock_irqrestore(&uport->lock, flags);
1404 if (flush < FLUSH_DATA_INVALID)
1405 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05301406}
1407
1408/* Enable the transmitter Interrupt */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409static void msm_hs_start_tx_locked(struct uart_port *uport )
Mayank Rana55046232011-03-07 10:28:42 +05301410{
1411 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1412
Mayank Rana55046232011-03-07 10:28:42 +05301413 if (msm_uport->tx.tx_ready_int_en == 0) {
1414 msm_uport->tx.tx_ready_int_en = 1;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001415 if (msm_uport->tx.dma_in_flight == 0)
1416 msm_hs_submit_tx_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301417 }
Mayank Rana55046232011-03-07 10:28:42 +05301418}
1419
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301420/**
1421 * Callback notification from SPS driver
1422 *
1423 * This callback function gets triggered called from
1424 * SPS driver when requested SPS data transfer is
1425 * completed.
1426 *
1427 */
1428
1429static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
1430{
1431 struct msm_hs_port *msm_uport =
1432 (struct msm_hs_port *)
1433 ((struct sps_event_notify *)notify)->user;
1434
1435 msm_uport->notify = *notify;
1436 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1437 __func__, notify->event_id,
1438 notify->data.transfer.iovec.addr,
1439 notify->data.transfer.iovec.size,
1440 notify->data.transfer.iovec.flags);
1441
1442 tasklet_schedule(&msm_uport->tx.tlet);
1443}
1444
Mayank Rana55046232011-03-07 10:28:42 +05301445/*
1446 * This routine is called when we are done with a DMA transfer
1447 *
1448 * This routine is registered with Data mover when we set
1449 * up a Data Mover transfer. It is called from Data mover ISR
1450 * when the DMA transfer is done.
1451 */
1452static void msm_hs_dmov_tx_callback(struct msm_dmov_cmd *cmd_ptr,
1453 unsigned int result,
1454 struct msm_dmov_errdata *err)
1455{
Mayank Rana55046232011-03-07 10:28:42 +05301456 struct msm_hs_port *msm_uport;
1457
Mayank Rana55046232011-03-07 10:28:42 +05301458 msm_uport = container_of(cmd_ptr, struct msm_hs_port, tx.xfer);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301459 if (msm_uport->tx.flush == FLUSH_STOP)
1460 /* DMA FLUSH unsuccesfful */
1461 WARN_ON(!(result & DMOV_RSLT_FLUSH));
1462 else
1463 /* DMA did not finish properly */
1464 WARN_ON(!(result & DMOV_RSLT_DONE));
Mayank Rana55046232011-03-07 10:28:42 +05301465
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001466 tasklet_schedule(&msm_uport->tx.tlet);
1467}
1468
1469static void msm_serial_hs_tx_tlet(unsigned long tlet_ptr)
1470{
1471 unsigned long flags;
1472 struct msm_hs_port *msm_uport = container_of((struct tasklet_struct *)
1473 tlet_ptr, struct msm_hs_port, tx.tlet);
1474
1475 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05301476 if (msm_uport->tx.flush == FLUSH_STOP) {
1477 msm_uport->tx.flush = FLUSH_SHUTDOWN;
1478 wake_up(&msm_uport->tx.wait);
1479 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1480 return;
1481 }
Mayank Rana55046232011-03-07 10:28:42 +05301482
1483 msm_uport->imr_reg |= UARTDM_ISR_TX_READY_BMSK;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484 msm_hs_write(&(msm_uport->uport), UARTDM_IMR_ADDR, msm_uport->imr_reg);
1485 /* Calling clk API. Hence mb() requires. */
1486 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301487
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001488 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
Mayank Rana55046232011-03-07 10:28:42 +05301489}
1490
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301491/**
1492 * Callback notification from SPS driver
1493 *
1494 * This callback function gets triggered called from
1495 * SPS driver when requested SPS data transfer is
1496 * completed.
1497 *
1498 */
1499
1500static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
1501{
1502
1503 struct msm_hs_port *msm_uport =
1504 (struct msm_hs_port *)
1505 ((struct sps_event_notify *)notify)->user;
Mayank Rana05396b22013-03-16 19:10:11 +05301506 struct uart_port *uport;
1507 unsigned long flags;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301508
Mayank Rana05396b22013-03-16 19:10:11 +05301509 uport = &(msm_uport->uport);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301510 msm_uport->notify = *notify;
1511 pr_debug("%s: sps ev_id=%d, addr=0x%x, size=0x%x, flags=0x%x\n",
1512 __func__, notify->event_id,
1513 notify->data.transfer.iovec.addr,
1514 notify->data.transfer.iovec.size,
1515 notify->data.transfer.iovec.flags);
1516
Mayank Rana05396b22013-03-16 19:10:11 +05301517 if (msm_uport->rx.flush == FLUSH_NONE) {
1518 spin_lock_irqsave(&uport->lock, flags);
1519 msm_uport->rx_count_callback = notify->data.transfer.iovec.size;
1520 spin_unlock_irqrestore(&uport->lock, flags);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301521 tasklet_schedule(&msm_uport->rx.tlet);
Mayank Rana05396b22013-03-16 19:10:11 +05301522 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301523}
1524
Mayank Rana55046232011-03-07 10:28:42 +05301525/*
1526 * This routine is called when we are done with a DMA transfer or the
1527 * a flush has been sent to the data mover driver.
1528 *
1529 * This routine is registered with Data mover when we set up a Data Mover
1530 * transfer. It is called from Data mover ISR when the DMA transfer is done.
1531 */
1532static void msm_hs_dmov_rx_callback(struct msm_dmov_cmd *cmd_ptr,
1533 unsigned int result,
1534 struct msm_dmov_errdata *err)
1535{
Mayank Rana55046232011-03-07 10:28:42 +05301536 struct msm_hs_port *msm_uport;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301537 struct uart_port *uport;
1538 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301539
1540 msm_uport = container_of(cmd_ptr, struct msm_hs_port, rx.xfer);
Mayank Rana9c8bda92013-02-28 11:58:04 +05301541 uport = &(msm_uport->uport);
1542
1543 pr_debug("%s(): called result:%x\n", __func__, result);
1544 if (!(result & DMOV_RSLT_ERROR)) {
1545 if (result & DMOV_RSLT_FLUSH) {
1546 if (msm_uport->rx_discard_flush_issued) {
1547 spin_lock_irqsave(&uport->lock, flags);
1548 msm_uport->rx_discard_flush_issued = false;
1549 spin_unlock_irqrestore(&uport->lock, flags);
1550 wake_up(&msm_uport->rx.wait);
1551 }
1552 }
1553 }
Mayank Rana55046232011-03-07 10:28:42 +05301554
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001555 tasklet_schedule(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05301556}
1557
1558/*
1559 * Standard API, Current states of modem control inputs
1560 *
1561 * Since CTS can be handled entirely by HARDWARE we always
1562 * indicate clear to send and count on the TX FIFO to block when
1563 * it fills up.
1564 *
1565 * - TIOCM_DCD
1566 * - TIOCM_CTS
1567 * - TIOCM_DSR
1568 * - TIOCM_RI
1569 * (Unsupported) DCD and DSR will return them high. RI will return low.
1570 */
1571static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
1572{
1573 return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
1574}
1575
1576/*
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001577 * Standard API, Set or clear RFR_signal
1578 *
1579 * Set RFR high, (Indicate we are not ready for data), we disable auto
1580 * ready for receiving and then set RFR_N high. To set RFR to low we just turn
1581 * back auto ready for receiving and it should lower RFR signal
1582 * when hardware is ready
Mayank Rana55046232011-03-07 10:28:42 +05301583 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001584void msm_hs_set_mctrl_locked(struct uart_port *uport,
Mayank Rana55046232011-03-07 10:28:42 +05301585 unsigned int mctrl)
1586{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001587 unsigned int set_rts;
1588 unsigned int data;
Mayank Rana55046232011-03-07 10:28:42 +05301589
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001590 /* RTS is active low */
1591 set_rts = TIOCM_RTS & mctrl ? 0 : 1;
Mayank Rana55046232011-03-07 10:28:42 +05301592
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001593 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
1594 if (set_rts) {
1595 /*disable auto ready-for-receiving */
1596 data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
1597 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1598 /* set RFR_N to high */
1599 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_HIGH);
1600 } else {
1601 /* Enable auto ready-for-receiving */
1602 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1603 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
1604 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001605 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301606}
1607
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608void msm_hs_set_mctrl(struct uart_port *uport,
1609 unsigned int mctrl)
1610{
1611 unsigned long flags;
1612
1613 spin_lock_irqsave(&uport->lock, flags);
1614 msm_hs_set_mctrl_locked(uport, mctrl);
1615 spin_unlock_irqrestore(&uport->lock, flags);
1616}
1617EXPORT_SYMBOL(msm_hs_set_mctrl);
1618
Mayank Rana55046232011-03-07 10:28:42 +05301619/* Standard API, Enable modem status (CTS) interrupt */
1620static void msm_hs_enable_ms_locked(struct uart_port *uport)
1621{
1622 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1623
Mayank Rana55046232011-03-07 10:28:42 +05301624 /* Enable DELTA_CTS Interrupt */
1625 msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
1626 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001627 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301628
1629}
1630
Saket Saurabhce394102012-10-29 19:51:28 +05301631static void msm_hs_flush_buffer(struct uart_port *uport)
1632{
1633 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1634
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301635 if (msm_uport->tx.dma_in_flight)
1636 msm_uport->tty_flush_receive = true;
Saket Saurabhce394102012-10-29 19:51:28 +05301637}
1638
Mayank Rana55046232011-03-07 10:28:42 +05301639/*
1640 * Standard API, Break Signal
1641 *
1642 * Control the transmission of a break signal. ctl eq 0 => break
1643 * signal terminate ctl ne 0 => start break signal
1644 */
1645static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
1646{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001647 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301648
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001649 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301650 msm_hs_write(uport, UARTDM_CR_ADDR, ctl ? START_BREAK : STOP_BREAK);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001651 mb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301653}
1654
1655static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
1656{
1657 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301659
Mayank Rana55046232011-03-07 10:28:42 +05301660 if (cfg_flags & UART_CONFIG_TYPE) {
1661 uport->type = PORT_MSM;
1662 msm_hs_request_port(uport);
1663 }
Mayank Ranabbfd2692011-09-20 08:51:17 +05301664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 if (is_gsbi_uart(msm_uport)) {
Mayank Rana00b6bff2011-08-17 08:33:42 +05301666 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301667 clk_prepare_enable(msm_uport->pclk);
1668 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001669 iowrite32(GSBI_PROTOCOL_UART, msm_uport->mapped_gsbi +
1670 GSBI_CONTROL_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301671 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana00b6bff2011-08-17 08:33:42 +05301672 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301673 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001674 }
Mayank Rana55046232011-03-07 10:28:42 +05301675}
1676
1677/* Handle CTS changes (Called from interrupt handler) */
Mayank Ranaee815f32011-12-08 09:06:09 +05301678static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301679{
Mayank Rana55046232011-03-07 10:28:42 +05301680 /* clear interrupt */
1681 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001682 /* Calling CLOCK API. Hence mb() requires here. */
1683 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301684 uport->icount.cts++;
1685
Mayank Rana55046232011-03-07 10:28:42 +05301686 /* clear the IOCTL TIOCMIWAIT if called */
1687 wake_up_interruptible(&uport->state->port.delta_msr_wait);
1688}
1689
1690/* check if the TX path is flushed, and if so clock off
1691 * returns 0 did not clock off, need to retry (still sending final byte)
1692 * -1 did not clock off, do not retry
1693 * 1 if we clocked off
1694 */
Mayank Ranacb589d82012-03-01 11:50:03 +05301695static int msm_hs_check_clock_off(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301696{
1697 unsigned long sr_status;
Mayank Ranacb589d82012-03-01 11:50:03 +05301698 unsigned long flags;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301699 int ret;
Mayank Rana55046232011-03-07 10:28:42 +05301700 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1701 struct circ_buf *tx_buf = &uport->state->xmit;
Mayank Rana05396b22013-03-16 19:10:11 +05301702 struct msm_hs_rx *rx = &msm_uport->rx;
1703 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05301704
Mayank Ranacb589d82012-03-01 11:50:03 +05301705 mutex_lock(&msm_uport->clk_mutex);
1706 spin_lock_irqsave(&uport->lock, flags);
1707
Mayank Rana55046232011-03-07 10:28:42 +05301708 /* Cancel if tx tty buffer is not empty, dma is in flight,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001709 * or tx fifo is not empty */
Mayank Rana55046232011-03-07 10:28:42 +05301710 if (msm_uport->clk_state != MSM_HS_CLK_REQUEST_OFF ||
1711 !uart_circ_empty(tx_buf) || msm_uport->tx.dma_in_flight ||
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001712 msm_uport->imr_reg & UARTDM_ISR_TXLEV_BMSK) {
Mayank Ranacb589d82012-03-01 11:50:03 +05301713 spin_unlock_irqrestore(&uport->lock, flags);
1714 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301715 return -1;
1716 }
1717
1718 /* Make sure the uart is finished with the last byte */
1719 sr_status = msm_hs_read(uport, UARTDM_SR_ADDR);
Mayank Ranacb589d82012-03-01 11:50:03 +05301720 if (!(sr_status & UARTDM_SR_TXEMT_BMSK)) {
1721 spin_unlock_irqrestore(&uport->lock, flags);
1722 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301723 return 0; /* retry */
Mayank Ranacb589d82012-03-01 11:50:03 +05301724 }
Mayank Rana55046232011-03-07 10:28:42 +05301725
1726 /* Make sure forced RXSTALE flush complete */
1727 switch (msm_uport->clk_req_off_state) {
1728 case CLK_REQ_OFF_START:
1729 msm_uport->clk_req_off_state = CLK_REQ_OFF_RXSTALE_ISSUED;
Mayank Rana05396b22013-03-16 19:10:11 +05301730
1731 if (!is_blsp_uart(msm_uport)) {
1732 msm_hs_write(uport, UARTDM_CR_ADDR, FORCE_STALE_EVENT);
1733 /*
1734 * Before returning make sure that device writel
1735 * completed. Hence mb() requires here.
1736 */
1737 mb();
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301738 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301739 spin_unlock_irqrestore(&uport->lock, flags);
1740 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301741 return 0; /* RXSTALE flush not complete - retry */
1742 case CLK_REQ_OFF_RXSTALE_ISSUED:
1743 case CLK_REQ_OFF_FLUSH_ISSUED:
Mayank Ranacb589d82012-03-01 11:50:03 +05301744 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana05396b22013-03-16 19:10:11 +05301745 if (is_blsp_uart(msm_uport)) {
1746 msm_uport->clk_req_off_state =
1747 CLK_REQ_OFF_RXSTALE_FLUSHED;
1748 sps_disconnect(sps_pipe_handle);
1749 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301750 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301751 return 0; /* RXSTALE flush not complete - retry */
1752 case CLK_REQ_OFF_RXSTALE_FLUSHED:
1753 break; /* continue */
1754 }
1755
1756 if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
Mayank Rana9c8bda92013-02-28 11:58:04 +05301757 if (msm_uport->rx.flush == FLUSH_NONE) {
Mayank Rana55046232011-03-07 10:28:42 +05301758 msm_hs_stop_rx_locked(uport);
Saket Saurabh467614f2013-03-16 17:24:12 +05301759 if (!is_blsp_uart(msm_uport))
1760 msm_uport->rx_discard_flush_issued = true;
Mayank Rana9c8bda92013-02-28 11:58:04 +05301761 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301762
1763 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana9c8bda92013-02-28 11:58:04 +05301764 if (msm_uport->rx_discard_flush_issued) {
1765 pr_debug("%s(): wainting for flush completion.\n",
1766 __func__);
1767 ret = wait_event_timeout(msm_uport->rx.wait,
1768 msm_uport->rx_discard_flush_issued == false,
1769 RX_FLUSH_COMPLETE_TIMEOUT);
1770 if (!ret)
1771 pr_err("%s(): Flush complete pending.\n",
1772 __func__);
1773 }
1774
Mayank Ranacb589d82012-03-01 11:50:03 +05301775 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05301776 return 0; /* come back later to really clock off */
1777 }
1778
Mayank Rana55046232011-03-07 10:28:42 +05301779 spin_unlock_irqrestore(&uport->lock, flags);
1780
Mayank Rana55046232011-03-07 10:28:42 +05301781 /* we really want to clock off */
Mayank Ranacb589d82012-03-01 11:50:03 +05301782 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05301784 clk_disable_unprepare(msm_uport->pclk);
1785
Mayank Rana55046232011-03-07 10:28:42 +05301786 msm_uport->clk_state = MSM_HS_CLK_OFF;
Mayank Ranacb589d82012-03-01 11:50:03 +05301787
1788 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 if (use_low_power_wakeup(msm_uport)) {
1790 msm_uport->wakeup.ignore = 1;
1791 enable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05301792 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001793 wake_unlock(&msm_uport->dma_wake_lock);
Mayank Ranacb589d82012-03-01 11:50:03 +05301794
1795 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301796
1797 /* Reset PNOC Bus Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301798 msm_hs_bus_voting(msm_uport, BUS_RESET);
Mayank Ranacb589d82012-03-01 11:50:03 +05301799 mutex_unlock(&msm_uport->clk_mutex);
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301800
Mayank Rana55046232011-03-07 10:28:42 +05301801 return 1;
1802}
1803
Mayank Ranacb589d82012-03-01 11:50:03 +05301804static void hsuart_clock_off_work(struct work_struct *w)
1805{
1806 struct msm_hs_port *msm_uport = container_of(w, struct msm_hs_port,
1807 clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301808 struct uart_port *uport = &msm_uport->uport;
1809
Mayank Ranacb589d82012-03-01 11:50:03 +05301810 if (!msm_hs_check_clock_off(uport)) {
1811 hrtimer_start(&msm_uport->clk_off_timer,
1812 msm_uport->clk_off_delay,
1813 HRTIMER_MODE_REL);
Mayank Rana55046232011-03-07 10:28:42 +05301814 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301815}
Mayank Rana55046232011-03-07 10:28:42 +05301816
Mayank Ranacb589d82012-03-01 11:50:03 +05301817static enum hrtimer_restart msm_hs_clk_off_retry(struct hrtimer *timer)
1818{
1819 struct msm_hs_port *msm_uport = container_of(timer, struct msm_hs_port,
1820 clk_off_timer);
Mayank Rana55046232011-03-07 10:28:42 +05301821
Mayank Ranacb589d82012-03-01 11:50:03 +05301822 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
1823 return HRTIMER_NORESTART;
Mayank Rana55046232011-03-07 10:28:42 +05301824}
1825
1826static irqreturn_t msm_hs_isr(int irq, void *dev)
1827{
1828 unsigned long flags;
1829 unsigned long isr_status;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05301831 struct uart_port *uport = &msm_uport->uport;
1832 struct circ_buf *tx_buf = &uport->state->xmit;
1833 struct msm_hs_tx *tx = &msm_uport->tx;
1834 struct msm_hs_rx *rx = &msm_uport->rx;
1835
1836 spin_lock_irqsave(&uport->lock, flags);
1837
1838 isr_status = msm_hs_read(uport, UARTDM_MISR_ADDR);
1839
1840 /* Uart RX starting */
1841 if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001842 wake_lock(&rx->wake_lock); /* hold wakelock while rx dma */
Mayank Rana55046232011-03-07 10:28:42 +05301843 msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
1844 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001845 /* Complete device write for IMR. Hence mb() requires. */
1846 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301847 }
1848 /* Stale rx interrupt */
1849 if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
1850 msm_hs_write(uport, UARTDM_CR_ADDR, STALE_EVENT_DISABLE);
1851 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001852 /*
1853 * Complete device write before calling DMOV API. Hence
1854 * mb() requires here.
1855 */
1856 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301857
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301858 if (msm_uport->clk_req_off_state ==
Mayank Rana05396b22013-03-16 19:10:11 +05301859 CLK_REQ_OFF_RXSTALE_ISSUED)
Mayank Rana55046232011-03-07 10:28:42 +05301860 msm_uport->clk_req_off_state =
Mayank Rana05396b22013-03-16 19:10:11 +05301861 CLK_REQ_OFF_FLUSH_ISSUED;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001862
Mayank Rana05396b22013-03-16 19:10:11 +05301863 if (!is_blsp_uart(msm_uport) && (rx->flush == FLUSH_NONE)) {
Mayank Rana55046232011-03-07 10:28:42 +05301864 rx->flush = FLUSH_DATA_READY;
Mayank Rana05396b22013-03-16 19:10:11 +05301865 msm_dmov_flush(msm_uport->dma_rx_channel, 1);
Mayank Rana55046232011-03-07 10:28:42 +05301866 }
1867 }
1868 /* tx ready interrupt */
1869 if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
1870 /* Clear TX Ready */
1871 msm_hs_write(uport, UARTDM_CR_ADDR, CLEAR_TX_READY);
1872
1873 if (msm_uport->clk_state == MSM_HS_CLK_REQUEST_OFF) {
1874 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1875 msm_hs_write(uport, UARTDM_IMR_ADDR,
1876 msm_uport->imr_reg);
1877 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001878 /*
1879 * Complete both writes before starting new TX.
1880 * Hence mb() requires here.
1881 */
1882 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301883 /* Complete DMA TX transactions and submit new transactions */
Saket Saurabhce394102012-10-29 19:51:28 +05301884
1885 /* Do not update tx_buf.tail if uart_flush_buffer already
1886 called in serial core */
1887 if (!msm_uport->tty_flush_receive)
1888 tx_buf->tail = (tx_buf->tail +
1889 tx->tx_count) & ~UART_XMIT_SIZE;
1890 else
1891 msm_uport->tty_flush_receive = false;
Mayank Rana55046232011-03-07 10:28:42 +05301892
1893 tx->dma_in_flight = 0;
1894
1895 uport->icount.tx += tx->tx_count;
1896 if (tx->tx_ready_int_en)
1897 msm_hs_submit_tx_locked(uport);
1898
1899 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
1900 uart_write_wakeup(uport);
1901 }
1902 if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
1903 /* TX FIFO is empty */
1904 msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
1905 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001906 /*
1907 * Complete device write before starting clock_off request.
1908 * Hence mb() requires here.
1909 */
1910 mb();
Mayank Ranacb589d82012-03-01 11:50:03 +05301911 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
Mayank Rana55046232011-03-07 10:28:42 +05301912 }
1913
1914 /* Change in CTS interrupt */
1915 if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
Mayank Ranaee815f32011-12-08 09:06:09 +05301916 msm_hs_handle_delta_cts_locked(uport);
Mayank Rana55046232011-03-07 10:28:42 +05301917
1918 spin_unlock_irqrestore(&uport->lock, flags);
1919
1920 return IRQ_HANDLED;
1921}
1922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001923/* request to turn off uart clock once pending TX is flushed */
1924void msm_hs_request_clock_off(struct uart_port *uport) {
1925 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301926 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1927
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001928 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301929 if (msm_uport->clk_state == MSM_HS_CLK_ON) {
1930 msm_uport->clk_state = MSM_HS_CLK_REQUEST_OFF;
1931 msm_uport->clk_req_off_state = CLK_REQ_OFF_START;
Mayank Rana55046232011-03-07 10:28:42 +05301932 msm_uport->imr_reg |= UARTDM_ISR_TXLEV_BMSK;
1933 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001934 /*
1935 * Complete device write before retuning back.
1936 * Hence mb() requires here.
1937 */
1938 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301939 }
Mayank Rana55046232011-03-07 10:28:42 +05301940 spin_unlock_irqrestore(&uport->lock, flags);
1941}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001942EXPORT_SYMBOL(msm_hs_request_clock_off);
Mayank Rana55046232011-03-07 10:28:42 +05301943
Mayank Ranacb589d82012-03-01 11:50:03 +05301944void msm_hs_request_clock_on(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05301945{
1946 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Ranacb589d82012-03-01 11:50:03 +05301947 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05301948 unsigned int data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001949 int ret = 0;
Mayank Rana55046232011-03-07 10:28:42 +05301950
Mayank Ranacb589d82012-03-01 11:50:03 +05301951 mutex_lock(&msm_uport->clk_mutex);
1952 spin_lock_irqsave(&uport->lock, flags);
Mayank Rana55046232011-03-07 10:28:42 +05301953
1954 switch (msm_uport->clk_state) {
1955 case MSM_HS_CLK_OFF:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001956 wake_lock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001957 disable_irq_nosync(msm_uport->wakeup.irq);
Mayank Ranacb589d82012-03-01 11:50:03 +05301958 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Rana88d49142013-01-16 17:28:53 +05301959
1960 /* Vote for PNOC BUS Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05301961 msm_hs_bus_voting(msm_uport, BUS_SCALING);
Mayank Rana88d49142013-01-16 17:28:53 +05301962
Mayank Ranacb589d82012-03-01 11:50:03 +05301963 ret = clk_prepare_enable(msm_uport->clk);
1964 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001965 dev_err(uport->dev, "Clock ON Failure"
Mayank Ranacb589d82012-03-01 11:50:03 +05301966 "For UART CLK Stalling HSUART\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001967 break;
1968 }
Mayank Ranacb589d82012-03-01 11:50:03 +05301969
1970 if (msm_uport->pclk) {
1971 ret = clk_prepare_enable(msm_uport->pclk);
1972 if (unlikely(ret)) {
1973 clk_disable_unprepare(msm_uport->clk);
1974 dev_err(uport->dev, "Clock ON Failure"
1975 "For UART Pclk Stalling HSUART\n");
1976 break;
1977 }
1978 }
1979 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001980 /* else fall-through */
Mayank Rana55046232011-03-07 10:28:42 +05301981 case MSM_HS_CLK_REQUEST_OFF:
1982 if (msm_uport->rx.flush == FLUSH_STOP ||
1983 msm_uport->rx.flush == FLUSH_SHUTDOWN) {
1984 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
1985 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05301986 if (is_blsp_uart(msm_uport))
1987 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1988 else
1989 data |= UARTDM_RX_DM_EN_BMSK;
Mayank Rana55046232011-03-07 10:28:42 +05301990 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001991 /* Complete above device write. Hence mb() here. */
1992 mb();
Mayank Rana55046232011-03-07 10:28:42 +05301993 }
1994 hrtimer_try_to_cancel(&msm_uport->clk_off_timer);
Mayank Rana05396b22013-03-16 19:10:11 +05301995 if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
1996 if (is_blsp_uart(msm_uport)) {
1997 spin_unlock_irqrestore(&uport->lock, flags);
1998 msm_hs_spsconnect_rx(uport);
1999 spin_lock_irqsave(&uport->lock, flags);
2000 }
Mayank Rana55046232011-03-07 10:28:42 +05302001 msm_hs_start_rx_locked(uport);
Mayank Rana05396b22013-03-16 19:10:11 +05302002 }
Mayank Rana55046232011-03-07 10:28:42 +05302003 if (msm_uport->rx.flush == FLUSH_STOP)
2004 msm_uport->rx.flush = FLUSH_IGNORE;
2005 msm_uport->clk_state = MSM_HS_CLK_ON;
2006 break;
2007 case MSM_HS_CLK_ON:
2008 break;
2009 case MSM_HS_CLK_PORT_OFF:
2010 break;
2011 }
Mayank Rana55046232011-03-07 10:28:42 +05302012
Mayank Rana55046232011-03-07 10:28:42 +05302013 spin_unlock_irqrestore(&uport->lock, flags);
Mayank Ranacb589d82012-03-01 11:50:03 +05302014 mutex_unlock(&msm_uport->clk_mutex);
Mayank Rana55046232011-03-07 10:28:42 +05302015}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002016EXPORT_SYMBOL(msm_hs_request_clock_on);
Mayank Rana55046232011-03-07 10:28:42 +05302017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002018static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
Mayank Rana55046232011-03-07 10:28:42 +05302019{
2020 unsigned int wakeup = 0;
2021 unsigned long flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002022 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
Mayank Rana55046232011-03-07 10:28:42 +05302023 struct uart_port *uport = &msm_uport->uport;
2024 struct tty_struct *tty = NULL;
2025
2026 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002027 if (msm_uport->clk_state == MSM_HS_CLK_OFF) {
2028 /* ignore the first irq - it is a pending irq that occured
Mayank Rana55046232011-03-07 10:28:42 +05302029 * before enable_irq() */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002030 if (msm_uport->wakeup.ignore)
2031 msm_uport->wakeup.ignore = 0;
Mayank Rana55046232011-03-07 10:28:42 +05302032 else
2033 wakeup = 1;
2034 }
2035
2036 if (wakeup) {
2037 /* the uart was clocked off during an rx, wake up and
2038 * optionally inject char into tty rx */
Mayank Ranacb589d82012-03-01 11:50:03 +05302039 spin_unlock_irqrestore(&uport->lock, flags);
2040 msm_hs_request_clock_on(uport);
2041 spin_lock_irqsave(&uport->lock, flags);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002042 if (msm_uport->wakeup.inject_rx) {
Mayank Rana55046232011-03-07 10:28:42 +05302043 tty = uport->state->port.tty;
2044 tty_insert_flip_char(tty,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002045 msm_uport->wakeup.rx_to_inject,
Mayank Rana55046232011-03-07 10:28:42 +05302046 TTY_NORMAL);
Mayank Rana55046232011-03-07 10:28:42 +05302047 }
2048 }
2049
2050 spin_unlock_irqrestore(&uport->lock, flags);
2051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002052 if (wakeup && msm_uport->wakeup.inject_rx)
2053 tty_flip_buffer_push(tty);
Mayank Rana55046232011-03-07 10:28:42 +05302054 return IRQ_HANDLED;
2055}
2056
2057static const char *msm_hs_type(struct uart_port *port)
2058{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002059 return ("MSM HS UART");
Mayank Rana55046232011-03-07 10:28:42 +05302060}
2061
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302062/**
2063 * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
2064 * @uport: uart port
2065 */
2066static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
2067{
2068 struct platform_device *pdev = to_platform_device(uport->dev);
2069 const struct msm_serial_hs_platform_data *pdata =
2070 pdev->dev.platform_data;
2071
2072 if (pdata) {
2073 if (gpio_is_valid(pdata->uart_tx_gpio))
2074 gpio_free(pdata->uart_tx_gpio);
2075 if (gpio_is_valid(pdata->uart_rx_gpio))
2076 gpio_free(pdata->uart_rx_gpio);
2077 if (gpio_is_valid(pdata->uart_cts_gpio))
2078 gpio_free(pdata->uart_cts_gpio);
2079 if (gpio_is_valid(pdata->uart_rfr_gpio))
2080 gpio_free(pdata->uart_rfr_gpio);
2081 } else {
2082 pr_err("Error:Pdata is NULL.\n");
2083 }
2084}
2085
2086/**
2087 * msm_hs_config_uart_gpios - Configures UART GPIOs
2088 * @uport: uart port
2089 */
2090static int msm_hs_config_uart_gpios(struct uart_port *uport)
2091{
2092 struct platform_device *pdev = to_platform_device(uport->dev);
2093 const struct msm_serial_hs_platform_data *pdata =
2094 pdev->dev.platform_data;
2095 int ret = 0;
2096
2097 if (pdata) {
2098 if (gpio_is_valid(pdata->uart_tx_gpio)) {
2099 ret = gpio_request(pdata->uart_tx_gpio,
2100 "UART_TX_GPIO");
2101 if (unlikely(ret)) {
2102 pr_err("gpio request failed for:%d\n",
2103 pdata->uart_tx_gpio);
2104 goto exit_uart_config;
2105 }
2106 }
2107
2108 if (gpio_is_valid(pdata->uart_rx_gpio)) {
2109 ret = gpio_request(pdata->uart_rx_gpio,
2110 "UART_RX_GPIO");
2111 if (unlikely(ret)) {
2112 pr_err("gpio request failed for:%d\n",
2113 pdata->uart_rx_gpio);
2114 goto uart_tx_unconfig;
2115 }
2116 }
2117
2118 if (gpio_is_valid(pdata->uart_cts_gpio)) {
2119 ret = gpio_request(pdata->uart_cts_gpio,
2120 "UART_CTS_GPIO");
2121 if (unlikely(ret)) {
2122 pr_err("gpio request failed for:%d\n",
2123 pdata->uart_cts_gpio);
2124 goto uart_rx_unconfig;
2125 }
2126 }
2127
2128 if (gpio_is_valid(pdata->uart_rfr_gpio)) {
2129 ret = gpio_request(pdata->uart_rfr_gpio,
2130 "UART_RFR_GPIO");
2131 if (unlikely(ret)) {
2132 pr_err("gpio request failed for:%d\n",
2133 pdata->uart_rfr_gpio);
2134 goto uart_cts_unconfig;
2135 }
2136 }
2137 } else {
2138 pr_err("Pdata is NULL.\n");
2139 ret = -EINVAL;
2140 }
2141 return ret;
2142
2143uart_cts_unconfig:
2144 if (gpio_is_valid(pdata->uart_cts_gpio))
2145 gpio_free(pdata->uart_cts_gpio);
2146uart_rx_unconfig:
2147 if (gpio_is_valid(pdata->uart_rx_gpio))
2148 gpio_free(pdata->uart_rx_gpio);
2149uart_tx_unconfig:
2150 if (gpio_is_valid(pdata->uart_tx_gpio))
2151 gpio_free(pdata->uart_tx_gpio);
2152exit_uart_config:
2153 return ret;
2154}
2155
Mayank Rana55046232011-03-07 10:28:42 +05302156/* Called when port is opened */
2157static int msm_hs_startup(struct uart_port *uport)
2158{
2159 int ret;
2160 int rfr_level;
2161 unsigned long flags;
2162 unsigned int data;
2163 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05302164 struct platform_device *pdev = to_platform_device(uport->dev);
2165 const struct msm_serial_hs_platform_data *pdata =
2166 pdev->dev.platform_data;
Mayank Rana55046232011-03-07 10:28:42 +05302167 struct circ_buf *tx_buf = &uport->state->xmit;
2168 struct msm_hs_tx *tx = &msm_uport->tx;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302169 struct msm_hs_rx *rx = &msm_uport->rx;
2170 struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
2171 struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05302172
2173 rfr_level = uport->fifosize;
2174 if (rfr_level > 16)
2175 rfr_level -= 16;
2176
2177 tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
2178 DMA_TO_DEVICE);
2179
Mayank Rana679436e2012-03-31 05:41:14 +05302180 wake_lock(&msm_uport->dma_wake_lock);
Mayank Rana55046232011-03-07 10:28:42 +05302181 /* turn on uart clk */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002182 ret = msm_hs_init_clk(uport);
Mayank Rana55046232011-03-07 10:28:42 +05302183 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302184 pr_err("Turning ON uartclk error\n");
2185 wake_unlock(&msm_uport->dma_wake_lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002186 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05302187 }
2188
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302189 if (is_blsp_uart(msm_uport)) {
2190 ret = msm_hs_config_uart_gpios(uport);
2191 if (ret) {
2192 pr_err("Uart GPIO request failed\n");
2193 goto deinit_uart_clk;
2194 }
2195 } else {
2196 if (pdata && pdata->gpio_config)
2197 if (unlikely(pdata->gpio_config(1)))
2198 dev_err(uport->dev, "Cannot configure gpios\n");
2199 }
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302200
2201 /* SPS Connect for BAM endpoints */
2202 if (is_blsp_uart(msm_uport)) {
2203 /* SPS connect for TX */
2204 ret = msm_hs_spsconnect_tx(uport);
2205 if (ret) {
2206 pr_err("msm_serial_hs: SPS connect failed for TX");
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302207 goto unconfig_uart_gpios;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302208 }
2209
2210 /* SPS connect for RX */
2211 ret = msm_hs_spsconnect_rx(uport);
2212 if (ret) {
2213 pr_err("msm_serial_hs: SPS connect failed for RX");
2214 goto sps_disconnect_tx;
2215 }
2216 }
2217
Mayank Rana55046232011-03-07 10:28:42 +05302218 /* Set auto RFR Level */
2219 data = msm_hs_read(uport, UARTDM_MR1_ADDR);
2220 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
2221 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
2222 data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
2223 data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
2224 msm_hs_write(uport, UARTDM_MR1_ADDR, data);
2225
2226 /* Make sure RXSTALE count is non-zero */
2227 data = msm_hs_read(uport, UARTDM_IPR_ADDR);
2228 if (!data) {
2229 data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
2230 msm_hs_write(uport, UARTDM_IPR_ADDR, data);
2231 }
2232
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302233 if (is_blsp_uart(msm_uport)) {
2234 /* Enable BAM mode */
2235 data = UARTDM_TX_BAM_ENABLE_BMSK | UARTDM_RX_BAM_ENABLE_BMSK;
2236 } else {
2237 /* Enable Data Mover Mode */
2238 data = UARTDM_TX_DM_EN_BMSK | UARTDM_RX_DM_EN_BMSK;
2239 }
Mayank Rana55046232011-03-07 10:28:42 +05302240 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
2241
2242 /* Reset TX */
2243 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
2244 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_RX);
2245 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_ERROR_STATUS);
2246 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_BREAK_INT);
2247 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_STALE_INT);
2248 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_CTS);
2249 msm_hs_write(uport, UARTDM_CR_ADDR, RFR_LOW);
2250 /* Turn on Uart Receiver */
2251 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_EN_BMSK);
2252
2253 /* Turn on Uart Transmitter */
2254 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_EN_BMSK);
2255
2256 /* Initialize the tx */
2257 tx->tx_ready_int_en = 0;
2258 tx->dma_in_flight = 0;
2259
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302260 if (!is_blsp_uart(msm_uport)) {
2261 tx->xfer.complete_func = msm_hs_dmov_tx_callback;
Mayank Rana55046232011-03-07 10:28:42 +05302262
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302263 tx->command_ptr->cmd = CMD_LC |
2264 CMD_DST_CRCI(msm_uport->dma_tx_crci) | CMD_MODE_BOX;
Mayank Rana55046232011-03-07 10:28:42 +05302265
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302266 tx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
Mayank Rana55046232011-03-07 10:28:42 +05302267 | (MSM_UARTDM_BURST_SIZE);
2268
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302269 tx->command_ptr->row_offset = (MSM_UARTDM_BURST_SIZE << 16);
Mayank Rana55046232011-03-07 10:28:42 +05302270
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302271 tx->command_ptr->dst_row_addr =
2272 msm_uport->uport.mapbase + UARTDM_TF_ADDR;
Mayank Rana05396b22013-03-16 19:10:11 +05302273
2274 msm_uport->imr_reg |= UARTDM_ISR_RXSTALE_BMSK;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302275 }
Mayank Rana55046232011-03-07 10:28:42 +05302276
Mayank Rana55046232011-03-07 10:28:42 +05302277 /* Enable reading the current CTS, no harm even if CTS is ignored */
2278 msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
2279
2280 msm_hs_write(uport, UARTDM_TFWR_ADDR, 0); /* TXLEV on empty TX fifo */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002281 /*
2282 * Complete all device write related configuration before
2283 * queuing RX request. Hence mb() requires here.
2284 */
2285 mb();
Mayank Rana55046232011-03-07 10:28:42 +05302286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002287 if (use_low_power_wakeup(msm_uport)) {
2288 ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
Mayank Rana679436e2012-03-31 05:41:14 +05302289 if (unlikely(ret)) {
2290 pr_err("%s():Err setting wakeup irq\n", __func__);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302291 goto sps_disconnect_rx;
Mayank Rana679436e2012-03-31 05:41:14 +05302292 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002293 }
Mayank Rana55046232011-03-07 10:28:42 +05302294
2295 ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
2296 "msm_hs_uart", msm_uport);
2297 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302298 pr_err("%s():Error getting uart irq\n", __func__);
2299 goto free_wake_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302300 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002301 if (use_low_power_wakeup(msm_uport)) {
Mayank Ranacb589d82012-03-01 11:50:03 +05302302
2303 ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
2304 msm_hs_wakeup_isr,
2305 IRQF_TRIGGER_FALLING,
2306 "msm_hs_wakeup", msm_uport);
2307
Mayank Rana55046232011-03-07 10:28:42 +05302308 if (unlikely(ret)) {
Mayank Rana679436e2012-03-31 05:41:14 +05302309 pr_err("%s():Err getting uart wakeup_irq\n", __func__);
2310 goto free_uart_irq;
Mayank Rana55046232011-03-07 10:28:42 +05302311 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002312 disable_irq(msm_uport->wakeup.irq);
Mayank Rana55046232011-03-07 10:28:42 +05302313 }
2314
Mayank Rana88d49142013-01-16 17:28:53 +05302315 /* Vote for PNOC BUS Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302316 msm_hs_bus_voting(msm_uport, BUS_SCALING);
Mayank Rana88d49142013-01-16 17:28:53 +05302317
Mayank Rana55046232011-03-07 10:28:42 +05302318 spin_lock_irqsave(&uport->lock, flags);
2319
Mayank Rana55046232011-03-07 10:28:42 +05302320 msm_hs_start_rx_locked(uport);
2321
2322 spin_unlock_irqrestore(&uport->lock, flags);
2323 ret = pm_runtime_set_active(uport->dev);
2324 if (ret)
2325 dev_err(uport->dev, "set active error:%d\n", ret);
2326 pm_runtime_enable(uport->dev);
2327
2328 return 0;
2329
Mayank Rana679436e2012-03-31 05:41:14 +05302330free_uart_irq:
2331 free_irq(uport->irq, msm_uport);
2332free_wake_irq:
2333 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302334sps_disconnect_rx:
2335 if (is_blsp_uart(msm_uport))
2336 sps_disconnect(sps_pipe_handle_rx);
2337sps_disconnect_tx:
2338 if (is_blsp_uart(msm_uport))
2339 sps_disconnect(sps_pipe_handle_tx);
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05302340unconfig_uart_gpios:
2341 if (is_blsp_uart(msm_uport))
2342 msm_hs_unconfig_uart_gpios(uport);
Mayank Rana679436e2012-03-31 05:41:14 +05302343deinit_uart_clk:
Mayank Ranacb589d82012-03-01 11:50:03 +05302344 clk_disable_unprepare(msm_uport->clk);
Mayank Rana679436e2012-03-31 05:41:14 +05302345 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05302346 clk_disable_unprepare(msm_uport->pclk);
Mayank Rana679436e2012-03-31 05:41:14 +05302347 wake_unlock(&msm_uport->dma_wake_lock);
2348
Mayank Rana55046232011-03-07 10:28:42 +05302349 return ret;
2350}
2351
2352/* Initialize tx and rx data structures */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002353static int uartdm_init_port(struct uart_port *uport)
Mayank Rana55046232011-03-07 10:28:42 +05302354{
2355 int ret = 0;
2356 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2357 struct msm_hs_tx *tx = &msm_uport->tx;
2358 struct msm_hs_rx *rx = &msm_uport->rx;
2359
Mayank Rana55046232011-03-07 10:28:42 +05302360 init_waitqueue_head(&rx->wait);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05302361 init_waitqueue_head(&tx->wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002362 wake_lock_init(&rx->wake_lock, WAKE_LOCK_SUSPEND, "msm_serial_hs_rx");
2363 wake_lock_init(&msm_uport->dma_wake_lock, WAKE_LOCK_SUSPEND,
2364 "msm_serial_hs_dma");
2365
2366 tasklet_init(&rx->tlet, msm_serial_hs_rx_tlet,
2367 (unsigned long) &rx->tlet);
2368 tasklet_init(&tx->tlet, msm_serial_hs_tx_tlet,
2369 (unsigned long) &tx->tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302370
2371 rx->pool = dma_pool_create("rx_buffer_pool", uport->dev,
2372 UARTDM_RX_BUF_SIZE, 16, 0);
2373 if (!rx->pool) {
2374 pr_err("%s(): cannot allocate rx_buffer_pool", __func__);
2375 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002376 goto exit_tasket_init;
Mayank Rana55046232011-03-07 10:28:42 +05302377 }
2378
2379 rx->buffer = dma_pool_alloc(rx->pool, GFP_KERNEL, &rx->rbuffer);
2380 if (!rx->buffer) {
2381 pr_err("%s(): cannot allocate rx->buffer", __func__);
2382 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002383 goto free_pool;
Mayank Rana55046232011-03-07 10:28:42 +05302384 }
2385
Mayank Ranaff398d02012-12-18 10:22:50 +05302386 /* Set up Uart Receive */
Mayank Rana05396b22013-03-16 19:10:11 +05302387 if (is_blsp_uart(msm_uport))
2388 msm_hs_write(uport, UARTDM_RFWR_ADDR, 32);
2389 else
2390 msm_hs_write(uport, UARTDM_RFWR_ADDR, 0);
Mayank Ranaff398d02012-12-18 10:22:50 +05302391
2392 INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
2393
2394 if (is_blsp_uart(msm_uport))
2395 return ret;
2396
2397 /* Allocate the command pointer. Needs to be 64 bit aligned */
2398 tx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2399 if (!tx->command_ptr) {
2400 return -ENOMEM;
2401 goto free_rx_buffer;
2402 }
2403
2404 tx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
2405 if (!tx->command_ptr_ptr) {
2406 ret = -ENOMEM;
2407 goto free_tx_command_ptr;
2408 }
2409
2410 tx->mapped_cmd_ptr = dma_map_single(uport->dev, tx->command_ptr,
2411 sizeof(dmov_box), DMA_TO_DEVICE);
2412 tx->mapped_cmd_ptr_ptr = dma_map_single(uport->dev,
2413 tx->command_ptr_ptr,
2414 sizeof(u32), DMA_TO_DEVICE);
2415 tx->xfer.cmdptr = DMOV_CMD_ADDR(tx->mapped_cmd_ptr_ptr);
2416
Mayank Rana55046232011-03-07 10:28:42 +05302417 /* Allocate the command pointer. Needs to be 64 bit aligned */
2418 rx->command_ptr = kmalloc(sizeof(dmov_box), GFP_KERNEL | __GFP_DMA);
2419 if (!rx->command_ptr) {
2420 pr_err("%s(): cannot allocate rx->command_ptr", __func__);
2421 ret = -ENOMEM;
Mayank Ranaff398d02012-12-18 10:22:50 +05302422 goto free_tx_command_ptr_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302423 }
2424
Mayank Rana8431de82011-12-08 09:06:08 +05302425 rx->command_ptr_ptr = kmalloc(sizeof(u32), GFP_KERNEL | __GFP_DMA);
Mayank Rana55046232011-03-07 10:28:42 +05302426 if (!rx->command_ptr_ptr) {
2427 pr_err("%s(): cannot allocate rx->command_ptr_ptr", __func__);
2428 ret = -ENOMEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002429 goto free_rx_command_ptr;
Mayank Rana55046232011-03-07 10:28:42 +05302430 }
2431
2432 rx->command_ptr->num_rows = ((UARTDM_RX_BUF_SIZE >> 4) << 16) |
2433 (UARTDM_RX_BUF_SIZE >> 4);
2434
2435 rx->command_ptr->dst_row_addr = rx->rbuffer;
2436
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002437 rx->xfer.complete_func = msm_hs_dmov_rx_callback;
2438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002439 rx->command_ptr->cmd = CMD_LC |
2440 CMD_SRC_CRCI(msm_uport->dma_rx_crci) | CMD_MODE_BOX;
2441
2442 rx->command_ptr->src_dst_len = (MSM_UARTDM_BURST_SIZE << 16)
2443 | (MSM_UARTDM_BURST_SIZE);
2444 rx->command_ptr->row_offset = MSM_UARTDM_BURST_SIZE;
2445 rx->command_ptr->src_row_addr = uport->mapbase + UARTDM_RF_ADDR;
2446
Mayank Rana55046232011-03-07 10:28:42 +05302447 rx->mapped_cmd_ptr = dma_map_single(uport->dev, rx->command_ptr,
2448 sizeof(dmov_box), DMA_TO_DEVICE);
2449
2450 *rx->command_ptr_ptr = CMD_PTR_LP | DMOV_CMD_ADDR(rx->mapped_cmd_ptr);
2451
2452 rx->cmdptr_dmaaddr = dma_map_single(uport->dev, rx->command_ptr_ptr,
Mayank Rana8431de82011-12-08 09:06:08 +05302453 sizeof(u32), DMA_TO_DEVICE);
Mayank Rana55046232011-03-07 10:28:42 +05302454 rx->xfer.cmdptr = DMOV_CMD_ADDR(rx->cmdptr_dmaaddr);
2455
Mayank Rana55046232011-03-07 10:28:42 +05302456 return ret;
2457
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002458free_rx_command_ptr:
Mayank Rana55046232011-03-07 10:28:42 +05302459 kfree(rx->command_ptr);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002460
Mayank Ranaff398d02012-12-18 10:22:50 +05302461free_tx_command_ptr_ptr:
2462 kfree(msm_uport->tx.command_ptr_ptr);
2463 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr_ptr,
2464 sizeof(u32), DMA_TO_DEVICE);
2465 dma_unmap_single(uport->dev, msm_uport->tx.mapped_cmd_ptr,
2466 sizeof(dmov_box), DMA_TO_DEVICE);
2467
2468free_tx_command_ptr:
2469 kfree(msm_uport->tx.command_ptr);
2470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002471free_rx_buffer:
Mayank Rana55046232011-03-07 10:28:42 +05302472 dma_pool_free(msm_uport->rx.pool, msm_uport->rx.buffer,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002473 msm_uport->rx.rbuffer);
2474
2475free_pool:
Mayank Rana55046232011-03-07 10:28:42 +05302476 dma_pool_destroy(msm_uport->rx.pool);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002477
2478exit_tasket_init:
2479 wake_lock_destroy(&msm_uport->rx.wake_lock);
2480 wake_lock_destroy(&msm_uport->dma_wake_lock);
2481 tasklet_kill(&msm_uport->tx.tlet);
2482 tasklet_kill(&msm_uport->rx.tlet);
Mayank Rana55046232011-03-07 10:28:42 +05302483 return ret;
2484}
2485
Mayank Ranaff398d02012-12-18 10:22:50 +05302486struct msm_serial_hs_platform_data
2487 *msm_hs_dt_to_pdata(struct platform_device *pdev)
2488{
2489 struct device_node *node = pdev->dev.of_node;
2490 struct msm_serial_hs_platform_data *pdata;
2491 int rx_to_inject, ret;
2492
2493 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2494 if (!pdata) {
2495 pr_err("unable to allocate memory for platform data\n");
2496 return ERR_PTR(-ENOMEM);
2497 }
2498
2499 /* UART TX GPIO */
2500 pdata->uart_tx_gpio = of_get_named_gpio(node,
2501 "qcom,tx-gpio", 0);
2502 if (pdata->uart_tx_gpio < 0)
2503 pr_debug("uart_tx_gpio is not available\n");
2504
2505 /* UART RX GPIO */
2506 pdata->uart_rx_gpio = of_get_named_gpio(node,
2507 "qcom,rx-gpio", 0);
2508 if (pdata->uart_rx_gpio < 0)
2509 pr_debug("uart_rx_gpio is not available\n");
2510
2511 /* UART CTS GPIO */
2512 pdata->uart_cts_gpio = of_get_named_gpio(node,
2513 "qcom,cts-gpio", 0);
2514 if (pdata->uart_cts_gpio < 0)
2515 pr_debug("uart_cts_gpio is not available\n");
2516
2517 /* UART RFR GPIO */
2518 pdata->uart_rfr_gpio = of_get_named_gpio(node,
2519 "qcom,rfr-gpio", 0);
2520 if (pdata->uart_rfr_gpio < 0)
2521 pr_debug("uart_rfr_gpio is not available\n");
2522
2523 pdata->inject_rx_on_wakeup = of_property_read_bool(node,
2524 "qcom,inject-rx-on-wakeup");
2525
2526 if (pdata->inject_rx_on_wakeup) {
2527 ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
2528 &rx_to_inject);
2529 if (ret < 0) {
2530 pr_err("Error: Rx_char_to_inject not specified.\n");
2531 return ERR_PTR(ret);
2532 }
2533 pdata->rx_to_inject = (char)rx_to_inject;
2534 }
2535
2536 ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
2537 &pdata->bam_tx_ep_pipe_index);
2538 if (ret < 0) {
2539 pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
2540 return ERR_PTR(ret);
2541 }
2542
2543 if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
2544 pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
2545 pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
2546 return ERR_PTR(-EINVAL);
2547 }
2548
2549 ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
2550 &pdata->bam_rx_ep_pipe_index);
2551 if (ret < 0) {
2552 pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
2553 return ERR_PTR(ret);
2554 }
2555
2556 if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
2557 pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
2558 pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
2559 return ERR_PTR(-EINVAL);
2560 }
2561
2562 pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
2563 "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
2564 pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
2565 pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
2566 pdata->uart_rfr_gpio);
2567
2568 return pdata;
2569}
2570
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302571
2572/**
2573 * Deallocate UART peripheral's SPS endpoint
2574 * @msm_uport - Pointer to msm_hs_port structure
2575 * @ep - Pointer to sps endpoint data structure
2576 */
2577
2578static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
2579 struct msm_hs_sps_ep_conn_data *ep)
2580{
2581 struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
2582 struct sps_connect *sps_config = &ep->config;
2583
2584 dma_free_coherent(msm_uport->uport.dev,
2585 sps_config->desc.size,
2586 &sps_config->desc.phys_base,
2587 GFP_KERNEL);
2588 sps_free_endpoint(sps_pipe_handle);
2589}
2590
2591
2592/**
2593 * Allocate UART peripheral's SPS endpoint
2594 *
2595 * This function allocates endpoint context
2596 * by calling appropriate SPS driver APIs.
2597 *
2598 * @msm_uport - Pointer to msm_hs_port structure
2599 * @ep - Pointer to sps endpoint data structure
2600 * @is_produce - 1 means Producer endpoint
2601 * - 0 means Consumer endpoint
2602 *
2603 * @return - 0 if successful else negative value
2604 */
2605
2606static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
2607 struct msm_hs_sps_ep_conn_data *ep,
2608 bool is_producer)
2609{
2610 int rc = 0;
2611 struct sps_pipe *sps_pipe_handle;
2612 struct sps_connect *sps_config = &ep->config;
2613 struct sps_register_event *sps_event = &ep->event;
2614
2615 /* Allocate endpoint context */
2616 sps_pipe_handle = sps_alloc_endpoint();
2617 if (!sps_pipe_handle) {
2618 pr_err("msm_serial_hs: sps_alloc_endpoint() failed!!\n"
2619 "is_producer=%d", is_producer);
2620 rc = -ENOMEM;
2621 goto out;
2622 }
2623
2624 /* Get default connection configuration for an endpoint */
2625 rc = sps_get_config(sps_pipe_handle, sps_config);
2626 if (rc) {
2627 pr_err("msm_serial_hs: sps_get_config() failed!!\n"
2628 "pipe_handle=0x%x rc=%d", (u32)sps_pipe_handle, rc);
2629 goto get_config_err;
2630 }
2631
2632 /* Modify the default connection configuration */
2633 if (is_producer) {
2634 /* For UART producer transfer, source is UART peripheral
2635 where as destination is system memory */
2636 sps_config->source = msm_uport->bam_handle;
2637 sps_config->destination = SPS_DEV_HANDLE_MEM;
2638 sps_config->mode = SPS_MODE_SRC;
2639 sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
2640 sps_config->dest_pipe_index = 0;
2641 sps_config->options = SPS_O_EOT;
2642 } else {
2643 /* For UART consumer transfer, source is system memory
2644 where as destination is UART peripheral */
2645 sps_config->source = SPS_DEV_HANDLE_MEM;
2646 sps_config->destination = msm_uport->bam_handle;
2647 sps_config->mode = SPS_MODE_DEST;
2648 sps_config->src_pipe_index = 0;
2649 sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
2650 sps_config->options = SPS_O_EOT;
2651 }
2652
2653 sps_config->event_thresh = 0x10;
2654
2655 /* Allocate maximum descriptor fifo size */
2656 sps_config->desc.size = 65532;
2657 sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
2658 sps_config->desc.size,
2659 &sps_config->desc.phys_base,
2660 GFP_KERNEL);
2661 if (!sps_config->desc.base) {
2662 rc = -ENOMEM;
2663 pr_err("msm_serial_hs: dma_alloc_coherent() failed!!\n");
2664 goto get_config_err;
2665 }
2666 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
2667
2668 sps_event->mode = SPS_TRIGGER_CALLBACK;
2669 sps_event->options = SPS_O_EOT;
2670 if (is_producer)
2671 sps_event->callback = msm_hs_sps_rx_callback;
2672 else
2673 sps_event->callback = msm_hs_sps_tx_callback;
2674
2675 sps_event->user = (void *)msm_uport;
2676
2677 /* Now save the sps pipe handle */
2678 ep->pipe_handle = sps_pipe_handle;
2679 pr_debug("msm_serial_hs: success !! %s: pipe_handle=0x%x\n"
2680 "desc_fifo.phys_base=0x%x\n",
2681 is_producer ? "READ" : "WRITE",
2682 (u32)sps_pipe_handle, sps_config->desc.phys_base);
2683 return 0;
2684
2685get_config_err:
2686 sps_free_endpoint(sps_pipe_handle);
2687out:
2688 return rc;
2689}
2690
2691/**
2692 * Initialize SPS HW connected with UART core
2693 *
2694 * This function register BAM HW resources with
2695 * SPS driver and then initialize 2 SPS endpoints
2696 *
2697 * msm_uport - Pointer to msm_hs_port structure
2698 *
2699 * @return - 0 if successful else negative value
2700 */
2701
2702static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
2703{
2704 int rc = 0;
2705 struct sps_bam_props bam = {0};
2706 u32 bam_handle;
2707
2708 rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
2709 if (rc || !bam_handle) {
2710 bam.phys_addr = msm_uport->bam_mem;
2711 bam.virt_addr = msm_uport->bam_base;
2712 /*
2713 * This event thresold value is only significant for BAM-to-BAM
2714 * transfer. It's ignored for BAM-to-System mode transfer.
2715 */
2716 bam.event_threshold = 0x10; /* Pipe event threshold */
2717 bam.summing_threshold = 1; /* BAM event threshold */
2718
2719 /* SPS driver wll handle the UART BAM IRQ */
2720 bam.irq = (u32)msm_uport->bam_irq;
2721 bam.manage = SPS_BAM_MGR_LOCAL;
2722
2723 pr_debug("msm_serial_hs: bam physical base=0x%x\n",
2724 (u32)bam.phys_addr);
2725 pr_debug("msm_serial_hs: bam virtual base=0x%x\n",
2726 (u32)bam.virt_addr);
2727
2728 /* Register UART Peripheral BAM device to SPS driver */
2729 rc = sps_register_bam_device(&bam, &bam_handle);
2730 if (rc) {
2731 pr_err("msm_serial_hs: BAM device register failed\n");
2732 return rc;
2733 }
2734 pr_info("msm_serial_hs: BAM device registered. bam_handle=0x%x",
2735 msm_uport->bam_handle);
2736 }
2737 msm_uport->bam_handle = bam_handle;
2738
2739 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
2740 UART_SPS_PROD_PERIPHERAL);
2741 if (rc) {
2742 pr_err("%s: Failed to Init Producer BAM-pipe", __func__);
2743 goto deregister_bam;
2744 }
2745
2746 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
2747 UART_SPS_CONS_PERIPHERAL);
2748 if (rc) {
2749 pr_err("%s: Failed to Init Consumer BAM-pipe", __func__);
2750 goto deinit_ep_conn_prod;
2751 }
2752 return 0;
2753
2754deinit_ep_conn_prod:
2755 msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
2756deregister_bam:
2757 sps_deregister_bam_device(msm_uport->bam_handle);
2758 return rc;
2759}
2760
Saket Saurabh10e88b32013-02-04 15:26:34 +05302761#define BLSP_UART_NR 12
2762static int deviceid[BLSP_UART_NR] = {0};
2763static atomic_t msm_serial_hs_next_id = ATOMIC_INIT(0);
2764
Mayank Rana55046232011-03-07 10:28:42 +05302765static int __devinit msm_hs_probe(struct platform_device *pdev)
2766{
Saket Saurabh10e88b32013-02-04 15:26:34 +05302767 int ret = 0, alias_num = -1;
Mayank Rana55046232011-03-07 10:28:42 +05302768 struct uart_port *uport;
2769 struct msm_hs_port *msm_uport;
Mayank Ranaff398d02012-12-18 10:22:50 +05302770 struct resource *core_resource;
2771 struct resource *bam_resource;
Mayank Rana55046232011-03-07 10:28:42 +05302772 struct resource *resource;
Mayank Ranaff398d02012-12-18 10:22:50 +05302773 int core_irqres, bam_irqres;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002774 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
Mayank Ranaff398d02012-12-18 10:22:50 +05302775
2776 if (pdev->dev.of_node) {
2777 dev_dbg(&pdev->dev, "device tree enabled\n");
2778 pdata = msm_hs_dt_to_pdata(pdev);
2779 if (IS_ERR(pdata))
2780 return PTR_ERR(pdata);
2781
Saket Saurabh10e88b32013-02-04 15:26:34 +05302782 if (pdev->id == -1) {
2783 pdev->id = atomic_inc_return(&msm_serial_hs_next_id)-1;
2784 deviceid[pdev->id] = 1;
2785 }
2786
2787 /* Use alias from device tree if present
2788 * Alias is used as an optional property
2789 */
2790 alias_num = of_alias_get_id(pdev->dev.of_node, "uart");
2791 if (alias_num >= 0) {
2792 /* If alias_num is between 0 and 11, check that it not
2793 * equal to previous incremented pdev-ids. If it is
2794 * equal to previous pdev.ids , fail deviceprobe.
2795 */
2796 if (alias_num < BLSP_UART_NR) {
2797 if (deviceid[alias_num] == 0) {
2798 pdev->id = alias_num;
2799 } else {
2800 pr_err("alias_num=%d already used\n",
2801 alias_num);
2802 return -EINVAL;
2803 }
2804 } else {
2805 pdev->id = alias_num;
2806 }
2807 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302808
2809 pdev->dev.platform_data = pdata;
2810 }
Mayank Rana55046232011-03-07 10:28:42 +05302811
2812 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
Mayank Ranaff398d02012-12-18 10:22:50 +05302813 pr_err("Invalid plaform device ID = %d\n", pdev->id);
Mayank Rana55046232011-03-07 10:28:42 +05302814 return -EINVAL;
2815 }
2816
2817 msm_uport = &q_uart_port[pdev->id];
2818 uport = &msm_uport->uport;
Mayank Rana55046232011-03-07 10:28:42 +05302819 uport->dev = &pdev->dev;
2820
Mayank Ranaff398d02012-12-18 10:22:50 +05302821 if (pdev->dev.of_node)
2822 msm_uport->uart_type = BLSP_HSUART;
Mayank Rana55046232011-03-07 10:28:42 +05302823
Mayank Ranaff398d02012-12-18 10:22:50 +05302824 /* Get required resources for BAM HSUART */
2825 if (is_blsp_uart(msm_uport)) {
2826 core_resource = platform_get_resource_byname(pdev,
2827 IORESOURCE_MEM, "core_mem");
2828 bam_resource = platform_get_resource_byname(pdev,
2829 IORESOURCE_MEM, "bam_mem");
2830 core_irqres = platform_get_irq_byname(pdev, "core_irq");
2831 bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002832
Mayank Ranaff398d02012-12-18 10:22:50 +05302833 if (!core_resource) {
2834 pr_err("Invalid core HSUART Resources.\n");
2835 return -ENXIO;
2836 }
2837
2838 if (!bam_resource) {
2839 pr_err("Invalid BAM HSUART Resources.\n");
2840 return -ENXIO;
2841 }
2842
2843 if (!core_irqres) {
2844 pr_err("Invalid core irqres Resources.\n");
2845 return -ENXIO;
2846 }
2847 if (!bam_irqres) {
2848 pr_err("Invalid bam irqres Resources.\n");
2849 return -ENXIO;
2850 }
2851
2852 uport->mapbase = core_resource->start;
2853
2854 uport->membase = ioremap(uport->mapbase,
2855 resource_size(core_resource));
2856 if (unlikely(!uport->membase)) {
2857 pr_err("UART Resource ioremap Failed.\n");
2858 return -ENOMEM;
2859 }
2860 msm_uport->bam_mem = bam_resource->start;
2861 msm_uport->bam_base = ioremap(msm_uport->bam_mem,
2862 resource_size(bam_resource));
2863 if (unlikely(!msm_uport->bam_base)) {
2864 pr_err("UART BAM Resource ioremap Failed.\n");
2865 iounmap(uport->membase);
2866 return -ENOMEM;
2867 }
2868
2869 uport->irq = core_irqres;
2870 msm_uport->bam_irq = bam_irqres;
2871
Mayank Rana88d49142013-01-16 17:28:53 +05302872 msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
2873 if (!msm_uport->bus_scale_table) {
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302874 pr_err("BLSP UART: Bus scaling is disabled.\n");
Mayank Rana88d49142013-01-16 17:28:53 +05302875 } else {
2876 msm_uport->bus_perf_client =
2877 msm_bus_scale_register_client
2878 (msm_uport->bus_scale_table);
2879 if (IS_ERR(&msm_uport->bus_perf_client)) {
2880 pr_err("%s(): Bus client register failed.\n",
2881 __func__);
Mayank Ranae4bc7de2013-01-22 12:51:16 +05302882 ret = -EINVAL;
Mayank Rana88d49142013-01-16 17:28:53 +05302883 goto unmap_memory;
2884 }
2885 }
Mayank Ranaff398d02012-12-18 10:22:50 +05302886 } else {
2887
2888 resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2889 if (unlikely(!resource))
2890 return -ENXIO;
2891 uport->mapbase = resource->start;
2892 uport->membase = ioremap(uport->mapbase,
2893 resource_size(resource));
2894 if (unlikely(!uport->membase))
2895 return -ENOMEM;
2896
2897 uport->irq = platform_get_irq(pdev, 0);
2898 if (unlikely((int)uport->irq < 0)) {
2899 pr_err("UART IRQ Failed.\n");
2900 iounmap(uport->membase);
2901 return -ENXIO;
2902 }
2903 }
Mayank Rana55046232011-03-07 10:28:42 +05302904
Mayank Rana55046232011-03-07 10:28:42 +05302905 if (pdata == NULL)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002906 msm_uport->wakeup.irq = -1;
2907 else {
2908 msm_uport->wakeup.irq = pdata->wakeup_irq;
2909 msm_uport->wakeup.ignore = 1;
2910 msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
2911 msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
2912
Mayank Ranaff398d02012-12-18 10:22:50 +05302913 if (unlikely(msm_uport->wakeup.irq < 0)) {
2914 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302915 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302916 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002917
Mayank Ranaff398d02012-12-18 10:22:50 +05302918 if (is_blsp_uart(msm_uport)) {
2919 msm_uport->bam_tx_ep_pipe_index =
2920 pdata->bam_tx_ep_pipe_index;
2921 msm_uport->bam_rx_ep_pipe_index =
2922 pdata->bam_rx_ep_pipe_index;
2923 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002924 }
Mayank Rana55046232011-03-07 10:28:42 +05302925
Mayank Ranaff398d02012-12-18 10:22:50 +05302926 if (!is_blsp_uart(msm_uport)) {
Mayank Rana55046232011-03-07 10:28:42 +05302927
Mayank Ranaff398d02012-12-18 10:22:50 +05302928 resource = platform_get_resource_byname(pdev,
2929 IORESOURCE_DMA, "uartdm_channels");
2930 if (unlikely(!resource)) {
2931 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302932 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302933 }
2934
2935 msm_uport->dma_tx_channel = resource->start;
2936 msm_uport->dma_rx_channel = resource->end;
2937
2938 resource = platform_get_resource_byname(pdev,
2939 IORESOURCE_DMA, "uartdm_crci");
2940 if (unlikely(!resource)) {
2941 ret = -ENXIO;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302942 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302943 }
2944
2945 msm_uport->dma_tx_crci = resource->start;
2946 msm_uport->dma_rx_crci = resource->end;
2947 }
Mayank Rana55046232011-03-07 10:28:42 +05302948
2949 uport->iotype = UPIO_MEM;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002950 uport->fifosize = 64;
Mayank Rana55046232011-03-07 10:28:42 +05302951 uport->ops = &msm_hs_ops;
2952 uport->flags = UPF_BOOT_AUTOCONF;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002953 uport->uartclk = 7372800;
Mayank Rana55046232011-03-07 10:28:42 +05302954 msm_uport->imr_reg = 0x0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002955
Matt Wagantalle2522372011-08-17 14:52:21 -07002956 msm_uport->clk = clk_get(&pdev->dev, "core_clk");
Mayank Ranaff398d02012-12-18 10:22:50 +05302957 if (IS_ERR(msm_uport->clk)) {
2958 ret = PTR_ERR(msm_uport->clk);
Mayank Rana43c8baa2013-02-23 14:57:14 +05302959 goto deregister_bus_client;
Mayank Ranaff398d02012-12-18 10:22:50 +05302960 }
Mayank Rana55046232011-03-07 10:28:42 +05302961
Matt Wagantalle2522372011-08-17 14:52:21 -07002962 msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002963 /*
2964 * Some configurations do not require explicit pclk control so
2965 * do not flag error on pclk get failure.
2966 */
2967 if (IS_ERR(msm_uport->pclk))
2968 msm_uport->pclk = NULL;
2969
2970 ret = clk_set_rate(msm_uport->clk, uport->uartclk);
2971 if (ret) {
2972 printk(KERN_WARNING "Error setting clock rate on UART\n");
Mayank Rana43c8baa2013-02-23 14:57:14 +05302973 goto put_clk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002974 }
2975
Mayank Ranacb589d82012-03-01 11:50:03 +05302976 msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
2977 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
2978 if (!msm_uport->hsuart_wq) {
2979 pr_err("%s(): Unable to create workqueue hsuart_wq\n",
2980 __func__);
Mayank Ranaff398d02012-12-18 10:22:50 +05302981 ret = -ENOMEM;
Mayank Rana43c8baa2013-02-23 14:57:14 +05302982 goto put_clk;
Mayank Ranacb589d82012-03-01 11:50:03 +05302983 }
2984
2985 INIT_WORK(&msm_uport->clock_off_w, hsuart_clock_off_work);
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302986
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302987 /* Init work for sps_disconnect in stop_rx_locked */
2988 INIT_WORK(&msm_uport->disconnect_rx_endpoint,
2989 hsuart_disconnect_rx_endpoint_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05302990 mutex_init(&msm_uport->clk_mutex);
2991
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302992 /* Initialize SPS HW connected with UART core */
2993 if (is_blsp_uart(msm_uport)) {
2994 ret = msm_hs_sps_init(msm_uport);
2995 if (unlikely(ret)) {
2996 pr_err("SPS Initialization failed ! err=%d", ret);
Mayank Rana43c8baa2013-02-23 14:57:14 +05302997 goto destroy_mutex;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05302998 }
2999 }
3000
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303001 msm_hs_bus_voting(msm_uport, BUS_SCALING);
3002
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003003 clk_prepare_enable(msm_uport->clk);
3004 if (msm_uport->pclk)
3005 clk_prepare_enable(msm_uport->pclk);
3006
Mayank Rana55046232011-03-07 10:28:42 +05303007 ret = uartdm_init_port(uport);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003008 if (unlikely(ret)) {
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303009 goto err_clock;
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003010 }
Mayank Rana55046232011-03-07 10:28:42 +05303011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003012 /* configure the CR Protection to Enable */
3013 msm_hs_write(uport, UARTDM_CR_ADDR, CR_PROTECTION_EN);
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003014
Matt Wagantall7f32d2a2012-05-17 15:48:04 -07003015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003016 /*
3017 * Enable Command register protection before going ahead as this hw
3018 * configuration makes sure that issued cmd to CR register gets complete
3019 * before next issued cmd start. Hence mb() requires here.
3020 */
3021 mb();
Mayank Rana55046232011-03-07 10:28:42 +05303022
3023 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
3024 hrtimer_init(&msm_uport->clk_off_timer, CLOCK_MONOTONIC,
3025 HRTIMER_MODE_REL);
3026 msm_uport->clk_off_timer.function = msm_hs_clk_off_retry;
3027 msm_uport->clk_off_delay = ktime_set(0, 1000000); /* 1ms */
3028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003029 ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
3030 if (unlikely(ret))
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303031 goto err_clock;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003032
3033 msm_serial_debugfs_init(msm_uport, pdev->id);
3034
Mayank Rana55046232011-03-07 10:28:42 +05303035 uport->line = pdev->id;
Saket Saurabh51690e52012-08-17 14:17:46 +05303036 if (pdata != NULL && pdata->userid && pdata->userid <= UARTDM_NR)
3037 uport->line = pdata->userid;
Mayank Ranaff398d02012-12-18 10:22:50 +05303038 ret = uart_add_one_port(&msm_hs_driver, uport);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303039 if (!ret) {
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303040 msm_hs_bus_voting(msm_uport, BUS_RESET);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303041 clk_disable_unprepare(msm_uport->clk);
3042 if (msm_uport->pclk)
3043 clk_disable_unprepare(msm_uport->pclk);
Mayank Ranaff398d02012-12-18 10:22:50 +05303044 return ret;
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303045 }
Mayank Ranaff398d02012-12-18 10:22:50 +05303046
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303047err_clock:
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303048
3049 msm_hs_bus_voting(msm_uport, BUS_RESET);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303050 clk_disable_unprepare(msm_uport->clk);
3051 if (msm_uport->pclk)
3052 clk_disable_unprepare(msm_uport->pclk);
Mayank Rana43c8baa2013-02-23 14:57:14 +05303053
3054destroy_mutex:
3055 mutex_destroy(&msm_uport->clk_mutex);
Saket Saurabh2c3f0b92013-01-16 15:06:39 +05303056 destroy_workqueue(msm_uport->hsuart_wq);
Mayank Rana43c8baa2013-02-23 14:57:14 +05303057
3058put_clk:
3059 if (msm_uport->pclk)
3060 clk_put(msm_uport->pclk);
3061
3062 if (msm_uport->clk)
3063 clk_put(msm_uport->clk);
3064
3065deregister_bus_client:
3066 if (is_blsp_uart(msm_uport))
3067 msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
Mayank Ranaff398d02012-12-18 10:22:50 +05303068unmap_memory:
3069 iounmap(uport->membase);
3070 if (is_blsp_uart(msm_uport))
3071 iounmap(msm_uport->bam_base);
3072
3073 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303074}
3075
3076static int __init msm_serial_hs_init(void)
3077{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003078 int ret;
3079 int i;
Mayank Rana55046232011-03-07 10:28:42 +05303080
3081 /* Init all UARTS as non-configured */
3082 for (i = 0; i < UARTDM_NR; i++)
3083 q_uart_port[i].uport.type = PORT_UNKNOWN;
3084
Mayank Rana55046232011-03-07 10:28:42 +05303085 ret = uart_register_driver(&msm_hs_driver);
3086 if (unlikely(ret)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003087 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
3088 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303089 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003090 debug_base = debugfs_create_dir("msm_serial_hs", NULL);
3091 if (IS_ERR_OR_NULL(debug_base))
3092 pr_info("msm_serial_hs: Cannot create debugfs dir\n");
Mayank Rana55046232011-03-07 10:28:42 +05303093
3094 ret = platform_driver_register(&msm_serial_hs_platform_driver);
3095 if (ret) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003096 printk(KERN_ERR "%s failed to load\n", __FUNCTION__);
3097 debugfs_remove_recursive(debug_base);
3098 uart_unregister_driver(&msm_hs_driver);
3099 return ret;
Mayank Rana55046232011-03-07 10:28:42 +05303100 }
3101
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003102 printk(KERN_INFO "msm_serial_hs module loaded\n");
Mayank Rana55046232011-03-07 10:28:42 +05303103 return ret;
3104}
Mayank Rana55046232011-03-07 10:28:42 +05303105
3106/*
3107 * Called by the upper layer when port is closed.
3108 * - Disables the port
3109 * - Unhook the ISR
3110 */
3111static void msm_hs_shutdown(struct uart_port *uport)
3112{
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303113 int ret;
3114 unsigned int data;
3115 unsigned long flags;
Mayank Rana55046232011-03-07 10:28:42 +05303116 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
Mayank Rana40836782012-11-16 14:45:47 +05303117 struct platform_device *pdev = to_platform_device(uport->dev);
3118 const struct msm_serial_hs_platform_data *pdata =
3119 pdev->dev.platform_data;
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303120 struct msm_hs_tx *tx = &msm_uport->tx;
3121 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
Mayank Rana55046232011-03-07 10:28:42 +05303122
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303123 if (msm_uport->tx.dma_in_flight) {
Saket Saurabhcbf6c522013-01-07 16:30:37 +05303124 if (!is_blsp_uart(msm_uport)) {
3125 spin_lock_irqsave(&uport->lock, flags);
3126 /* disable UART TX interface to DM */
3127 data = msm_hs_read(uport, UARTDM_DMEN_ADDR);
3128 data &= ~UARTDM_TX_DM_EN_BMSK;
3129 msm_hs_write(uport, UARTDM_DMEN_ADDR, data);
3130 /* turn OFF UART Transmitter */
3131 msm_hs_write(uport, UARTDM_CR_ADDR,
3132 UARTDM_CR_TX_DISABLE_BMSK);
3133 /* reset UART TX */
3134 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX);
3135 /* reset UART TX Error */
3136 msm_hs_write(uport, UARTDM_CR_ADDR, RESET_TX_ERROR);
3137 msm_uport->tx.flush = FLUSH_STOP;
3138 spin_unlock_irqrestore(&uport->lock, flags);
3139 /* discard flush */
3140 msm_dmov_flush(msm_uport->dma_tx_channel, 0);
3141 ret = wait_event_timeout(msm_uport->tx.wait,
3142 msm_uport->tx.flush == FLUSH_SHUTDOWN, 100);
3143 if (!ret)
3144 pr_err("%s():HSUART TX Stalls.\n", __func__);
3145 } else {
3146 /* BAM Disconnect for TX */
3147 sps_disconnect(sps_pipe_handle);
3148 }
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303149 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003150 tasklet_kill(&msm_uport->tx.tlet);
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303151 BUG_ON(msm_uport->rx.flush < FLUSH_STOP);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003152 wait_event(msm_uport->rx.wait, msm_uport->rx.flush == FLUSH_SHUTDOWN);
3153 tasklet_kill(&msm_uport->rx.tlet);
3154 cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
Mayank Ranacb589d82012-03-01 11:50:03 +05303155 flush_workqueue(msm_uport->hsuart_wq);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003156 pm_runtime_disable(uport->dev);
3157 pm_runtime_set_suspended(uport->dev);
Mayank Rana55046232011-03-07 10:28:42 +05303158
3159 /* Disable the transmitter */
3160 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_TX_DISABLE_BMSK);
3161 /* Disable the receiver */
3162 msm_hs_write(uport, UARTDM_CR_ADDR, UARTDM_CR_RX_DISABLE_BMSK);
3163
Mayank Rana55046232011-03-07 10:28:42 +05303164 msm_uport->imr_reg = 0;
3165 msm_hs_write(uport, UARTDM_IMR_ADDR, msm_uport->imr_reg);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003166 /*
3167 * Complete all device write before actually disabling uartclk.
3168 * Hence mb() requires here.
3169 */
3170 mb();
Mayank Rana88d49142013-01-16 17:28:53 +05303171
3172 /* Reset PNOC Bus Scaling */
Mayank Ranae4bc7de2013-01-22 12:51:16 +05303173 msm_hs_bus_voting(msm_uport, BUS_RESET);
Mayank Rana88d49142013-01-16 17:28:53 +05303174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003175 if (msm_uport->clk_state != MSM_HS_CLK_OFF) {
Mayank Ranacb589d82012-03-01 11:50:03 +05303176 /* to balance clk_state */
3177 clk_disable_unprepare(msm_uport->clk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003178 if (msm_uport->pclk)
Mayank Ranacb589d82012-03-01 11:50:03 +05303179 clk_disable_unprepare(msm_uport->pclk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003180 wake_unlock(&msm_uport->dma_wake_lock);
3181 }
Mayank Rana55046232011-03-07 10:28:42 +05303182
Mayank Ranaaf2f0082012-05-22 10:16:02 +05303183 msm_uport->clk_state = MSM_HS_CLK_PORT_OFF;
Mayank Rana55046232011-03-07 10:28:42 +05303184 dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
3185 UART_XMIT_SIZE, DMA_TO_DEVICE);
3186
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003187 if (use_low_power_wakeup(msm_uport))
3188 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
Mayank Rana55046232011-03-07 10:28:42 +05303189
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003190 /* Free the interrupt */
3191 free_irq(uport->irq, msm_uport);
3192 if (use_low_power_wakeup(msm_uport))
3193 free_irq(msm_uport->wakeup.irq, msm_uport);
Mayank Rana40836782012-11-16 14:45:47 +05303194
Saket Saurabhfe3b93b2013-02-04 18:44:12 +05303195 if (is_blsp_uart(msm_uport)) {
3196 msm_hs_unconfig_uart_gpios(uport);
3197 } else {
3198 if (pdata && pdata->gpio_config)
3199 if (pdata->gpio_config(0))
3200 dev_err(uport->dev, "GPIO config error\n");
3201 }
Mayank Rana55046232011-03-07 10:28:42 +05303202}
3203
3204static void __exit msm_serial_hs_exit(void)
3205{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003206 printk(KERN_INFO "msm_serial_hs module removed\n");
Mayank Rana17e0e1a2012-04-07 02:10:33 +05303207 debugfs_remove_recursive(debug_base);
Mayank Rana55046232011-03-07 10:28:42 +05303208 platform_driver_unregister(&msm_serial_hs_platform_driver);
3209 uart_unregister_driver(&msm_hs_driver);
3210}
Mayank Rana55046232011-03-07 10:28:42 +05303211
Mayank Rana55046232011-03-07 10:28:42 +05303212static int msm_hs_runtime_idle(struct device *dev)
3213{
3214 /*
3215 * returning success from idle results in runtime suspend to be
3216 * called
3217 */
3218 return 0;
3219}
3220
3221static int msm_hs_runtime_resume(struct device *dev)
3222{
3223 struct platform_device *pdev = container_of(dev, struct
3224 platform_device, dev);
3225 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303226 msm_hs_request_clock_on(&msm_uport->uport);
3227 return 0;
3228}
3229
3230static int msm_hs_runtime_suspend(struct device *dev)
3231{
3232 struct platform_device *pdev = container_of(dev, struct
3233 platform_device, dev);
3234 struct msm_hs_port *msm_uport = &q_uart_port[pdev->id];
Mayank Rana55046232011-03-07 10:28:42 +05303235 msm_hs_request_clock_off(&msm_uport->uport);
3236 return 0;
3237}
Mayank Rana55046232011-03-07 10:28:42 +05303238
3239static const struct dev_pm_ops msm_hs_dev_pm_ops = {
3240 .runtime_suspend = msm_hs_runtime_suspend,
3241 .runtime_resume = msm_hs_runtime_resume,
3242 .runtime_idle = msm_hs_runtime_idle,
3243};
3244
Mayank Ranaff398d02012-12-18 10:22:50 +05303245static struct of_device_id msm_hs_match_table[] = {
3246 { .compatible = "qcom,msm-hsuart-v14" },
3247 {}
3248};
3249
Mayank Rana55046232011-03-07 10:28:42 +05303250static struct platform_driver msm_serial_hs_platform_driver = {
Mayank Rana17e0e1a2012-04-07 02:10:33 +05303251 .probe = msm_hs_probe,
Mayank Rana55046232011-03-07 10:28:42 +05303252 .remove = __devexit_p(msm_hs_remove),
3253 .driver = {
3254 .name = "msm_serial_hs",
Mayank Rana55046232011-03-07 10:28:42 +05303255 .pm = &msm_hs_dev_pm_ops,
Mayank Ranaff398d02012-12-18 10:22:50 +05303256 .of_match_table = msm_hs_match_table,
Mayank Rana55046232011-03-07 10:28:42 +05303257 },
3258};
3259
3260static struct uart_driver msm_hs_driver = {
3261 .owner = THIS_MODULE,
3262 .driver_name = "msm_serial_hs",
3263 .dev_name = "ttyHS",
3264 .nr = UARTDM_NR,
3265 .cons = 0,
3266};
3267
3268static struct uart_ops msm_hs_ops = {
3269 .tx_empty = msm_hs_tx_empty,
3270 .set_mctrl = msm_hs_set_mctrl_locked,
3271 .get_mctrl = msm_hs_get_mctrl_locked,
3272 .stop_tx = msm_hs_stop_tx_locked,
3273 .start_tx = msm_hs_start_tx_locked,
3274 .stop_rx = msm_hs_stop_rx_locked,
3275 .enable_ms = msm_hs_enable_ms_locked,
3276 .break_ctl = msm_hs_break_ctl,
3277 .startup = msm_hs_startup,
3278 .shutdown = msm_hs_shutdown,
3279 .set_termios = msm_hs_set_termios,
Mayank Rana55046232011-03-07 10:28:42 +05303280 .type = msm_hs_type,
3281 .config_port = msm_hs_config_port,
3282 .release_port = msm_hs_release_port,
3283 .request_port = msm_hs_request_port,
Saket Saurabhce394102012-10-29 19:51:28 +05303284 .flush_buffer = msm_hs_flush_buffer,
Mayank Rana55046232011-03-07 10:28:42 +05303285};
3286
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003287module_init(msm_serial_hs_init);
3288module_exit(msm_serial_hs_exit);
Mayank Rana55046232011-03-07 10:28:42 +05303289MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
3290MODULE_VERSION("1.2");
3291MODULE_LICENSE("GPL v2");