blob: 6a05d5b84661d58cc6e4da5fd17a5c80b05c95d7 [file] [log] [blame]
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05301/* drivers/serial/msm_serial_hs.c
2 *
3 * MSM 7k High speed uart driver
4 *
5 * Copyright (c) 2008 Google Inc.
6 * Copyright (c) 2007-2017, The Linux Foundation. All rights reserved.
7 * Modified: Nick Pelly <npelly@google.com>
8 *
9 * All source code in this file is licensed under the following license
10 * except where indicated.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * Has optional support for uart power management independent of linux
22 * suspend/resume:
23 *
24 * RX wakeup.
25 * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
26 * UART RX pin). This should only be used if there is not a wakeup
27 * GPIO on the UART CTS, and the first RX byte is known (for example, with the
28 * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
29 * always be lost. RTS will be asserted even while the UART is off in this mode
30 * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
31 */
32
33#include <linux/module.h>
34
35#include <linux/serial.h>
36#include <linux/serial_core.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/interrupt.h>
40#include <linux/irq.h>
41#include <linux/io.h>
42#include <linux/ioport.h>
43#include <linux/atomic.h>
44#include <linux/kernel.h>
45#include <linux/timer.h>
46#include <linux/clk.h>
47#include <linux/delay.h>
48#include <linux/platform_device.h>
49#include <linux/pm_runtime.h>
50#include <linux/dma-mapping.h>
51#include <linux/tty_flip.h>
52#include <linux/wait.h>
53#include <linux/sysfs.h>
54#include <linux/stat.h>
55#include <linux/device.h>
56#include <linux/wakelock.h>
57#include <linux/debugfs.h>
58#include <linux/of.h>
59#include <linux/of_device.h>
60#include <linux/of_gpio.h>
61#include <linux/gpio.h>
62#include <linux/ipc_logging.h>
63#include <asm/irq.h>
64#include <linux/kthread.h>
65
66#include <linux/msm-sps.h>
67#include <linux/platform_data/msm_serial_hs.h>
68#include <linux/msm-bus.h>
69
70#include "msm_serial_hs_hwreg.h"
71#define UART_SPS_CONS_PERIPHERAL 0
72#define UART_SPS_PROD_PERIPHERAL 1
73
74#define IPC_MSM_HS_LOG_STATE_PAGES 2
75#define IPC_MSM_HS_LOG_USER_PAGES 2
76#define IPC_MSM_HS_LOG_DATA_PAGES 3
77#define UART_DMA_DESC_NR 8
78#define BUF_DUMP_SIZE 32
79
80/* If the debug_mask gets set to FATAL_LEV,
81 * a fatal error has happened and further IPC logging
82 * is disabled so that this problem can be detected
83 */
84enum {
85 FATAL_LEV = 0U,
86 ERR_LEV = 1U,
87 WARN_LEV = 2U,
88 INFO_LEV = 3U,
89 DBG_LEV = 4U,
90};
91
92#define MSM_HS_DBG(x...) do { \
93 if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
94 if (msm_uport->ipc_msm_hs_log_ctxt) \
95 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
96 } \
97} while (0)
98
99#define MSM_HS_INFO(x...) do { \
100 if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
101 if (msm_uport->ipc_msm_hs_log_ctxt) \
102 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
103 } \
104} while (0)
105
106/* warnings and errors show up on console always */
107#define MSM_HS_WARN(x...) do { \
108 pr_warn(x); \
109 if (msm_uport->ipc_msm_hs_log_ctxt && \
110 msm_uport->ipc_debug_mask >= WARN_LEV) \
111 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
112} while (0)
113
114/* ERROR condition in the driver sets the hs_serial_debug_mask
115 * to ERR_FATAL level, so that this message can be seen
116 * in IPC logging. Further errors continue to log on the console
117 */
118#define MSM_HS_ERR(x...) do { \
119 pr_err(x); \
120 if (msm_uport->ipc_msm_hs_log_ctxt && \
121 msm_uport->ipc_debug_mask >= ERR_LEV) { \
122 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
123 msm_uport->ipc_debug_mask = FATAL_LEV; \
124 } \
125} while (0)
126
127#define LOG_USR_MSG(ctx, x...) do { \
128 if (ctx) \
129 ipc_log_string(ctx, x); \
130} while (0)
131
132/*
133 * There are 3 different kind of UART Core available on MSM.
134 * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
135 * and BSLP based HSUART.
136 */
137enum uart_core_type {
138 LEGACY_HSUART,
139 GSBI_HSUART,
140 BLSP_HSUART,
141};
142
143enum flush_reason {
144 FLUSH_NONE,
145 FLUSH_DATA_READY,
146 FLUSH_DATA_INVALID, /* values after this indicate invalid data */
147 FLUSH_IGNORE,
148 FLUSH_STOP,
149 FLUSH_SHUTDOWN,
150};
151
152/*
153 * SPS data structures to support HSUART with BAM
154 * @sps_pipe - This struct defines BAM pipe descriptor
155 * @sps_connect - This struct defines a connection's end point
156 * @sps_register - This struct defines a event registration parameters
157 */
158struct msm_hs_sps_ep_conn_data {
159 struct sps_pipe *pipe_handle;
160 struct sps_connect config;
161 struct sps_register_event event;
162};
163
164struct msm_hs_tx {
165 bool dma_in_flight; /* tx dma in progress */
166 enum flush_reason flush;
167 wait_queue_head_t wait;
168 int tx_count;
169 dma_addr_t dma_base;
170 struct kthread_work kwork;
171 struct kthread_worker kworker;
172 struct task_struct *task;
173 struct msm_hs_sps_ep_conn_data cons;
174 struct timer_list tx_timeout_timer;
175 void *ipc_tx_ctxt;
176};
177
178struct msm_hs_rx {
179 enum flush_reason flush;
180 wait_queue_head_t wait;
181 dma_addr_t rbuffer;
182 unsigned char *buffer;
183 unsigned int buffer_pending;
184 struct delayed_work flip_insert_work;
185 struct kthread_work kwork;
186 struct kthread_worker kworker;
187 struct task_struct *task;
188 struct msm_hs_sps_ep_conn_data prod;
189 unsigned long queued_flag;
190 unsigned long pending_flag;
191 int rx_inx;
192 struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
193 void *ipc_rx_ctxt;
194};
195enum buffer_states {
196 NONE_PENDING = 0x0,
197 FIFO_OVERRUN = 0x1,
198 PARITY_ERROR = 0x2,
199 CHARS_NORMAL = 0x4,
200};
201
202enum msm_hs_pm_state {
203 MSM_HS_PM_ACTIVE,
204 MSM_HS_PM_SUSPENDED,
205 MSM_HS_PM_SYS_SUSPENDED,
206};
207
208/* optional low power wakeup, typically on a GPIO RX irq */
209struct msm_hs_wakeup {
210 int irq; /* < 0 indicates low power wakeup disabled */
211 unsigned char ignore; /* bool */
212
213 /* bool: inject char into rx tty on wakeup */
214 bool inject_rx;
215 unsigned char rx_to_inject;
216 bool enabled;
217 bool freed;
218};
219
220struct msm_hs_port {
221 struct uart_port uport;
222 unsigned long imr_reg; /* shadow value of UARTDM_IMR */
223 struct clk *clk;
224 struct clk *pclk;
225 struct msm_hs_tx tx;
226 struct msm_hs_rx rx;
227 atomic_t resource_count;
228 struct msm_hs_wakeup wakeup;
229
230 struct dentry *loopback_dir;
231 struct work_struct clock_off_w; /* work for actual clock off */
232 struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
233 struct mutex mtx; /* resource access mutex */
234 enum uart_core_type uart_type;
235 unsigned long bam_handle;
236 resource_size_t bam_mem;
237 int bam_irq;
238 unsigned char __iomem *bam_base;
239 unsigned int bam_tx_ep_pipe_index;
240 unsigned int bam_rx_ep_pipe_index;
241 /* struct sps_event_notify is an argument passed when triggering a
242 * callback event object registered for an SPS connection end point.
243 */
244 struct sps_event_notify notify;
245 /* bus client handler */
246 u32 bus_perf_client;
247 /* BLSP UART required BUS Scaling data */
248 struct msm_bus_scale_pdata *bus_scale_table;
249 bool rx_bam_inprogress;
250 wait_queue_head_t bam_disconnect_wait;
251 bool use_pinctrl;
252 struct pinctrl *pinctrl;
253 struct pinctrl_state *gpio_state_active;
254 struct pinctrl_state *gpio_state_suspend;
255 bool flow_control;
256 enum msm_hs_pm_state pm_state;
257 atomic_t client_count;
258 bool obs; /* out of band sleep flag */
259 atomic_t client_req_state;
260 void *ipc_msm_hs_log_ctxt;
261 void *ipc_msm_hs_pwr_ctxt;
262 int ipc_debug_mask;
263};
264
265static const struct of_device_id msm_hs_match_table[] = {
266 { .compatible = "qcom,msm-hsuart-v14"},
267 {}
268};
269
270
271#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
272#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
273#define UARTDM_RX_BUF_SIZE 512
274#define RETRY_TIMEOUT 5
275#define UARTDM_NR 256
276#define BAM_PIPE_MIN 0
277#define BAM_PIPE_MAX 11
278#define BUS_SCALING 1
279#define BUS_RESET 0
280#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
281#define BLSP_UART_CLK_FMAX 63160000
282
283static struct dentry *debug_base;
284static struct platform_driver msm_serial_hs_platform_driver;
285static struct uart_driver msm_hs_driver;
286static const struct uart_ops msm_hs_ops;
287static void msm_hs_start_rx_locked(struct uart_port *uport);
288static void msm_serial_hs_rx_work(struct kthread_work *work);
289static void flip_insert_work(struct work_struct *work);
290static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
291static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
292static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
293static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
294static int msm_hs_pm_resume(struct device *dev);
295
296#define UARTDM_TO_MSM(uart_port) \
297 container_of((uart_port), struct msm_hs_port, uport)
298
299static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
300 unsigned long arg)
301{
302 int ret = 0, state = 1;
303 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
304
305 if (!msm_uport)
306 return -ENODEV;
307
308 switch (cmd) {
309 case MSM_ENABLE_UART_CLOCK: {
310 ret = msm_hs_request_clock_on(&msm_uport->uport);
311 break;
312 }
313 case MSM_DISABLE_UART_CLOCK: {
314 ret = msm_hs_request_clock_off(&msm_uport->uport);
315 break;
316 }
317 case MSM_GET_UART_CLOCK_STATUS: {
318 /* Return value 0 - UART CLOCK is OFF
319 * Return value 1 - UART CLOCK is ON
320 */
321
322 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
323 state = 0;
324 ret = state;
325 MSM_HS_INFO("%s():GET UART CLOCK STATUS: cmd=%d state=%d\n",
326 __func__, cmd, state);
327 break;
328 }
329 default: {
330 MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
331 cmd);
332 ret = -ENOIOCTLCMD;
333 break;
334 }
335 }
336
337 return ret;
338}
339
340/*
341 * This function is called initially during probe and then
342 * through the runtime PM framework. The function directly calls
343 * resource APIs to enable them.
344 */
345
346static int msm_hs_clk_bus_vote(struct msm_hs_port *msm_uport)
347{
348 int rc = 0;
349
350 msm_hs_bus_voting(msm_uport, BUS_SCALING);
351 /* Turn on core clk and iface clk */
352 if (msm_uport->pclk) {
353 rc = clk_prepare_enable(msm_uport->pclk);
354 if (rc) {
355 dev_err(msm_uport->uport.dev,
356 "%s: Could not turn on pclk [%d]\n",
357 __func__, rc);
358 goto busreset;
359 }
360 }
361 rc = clk_prepare_enable(msm_uport->clk);
362 if (rc) {
363 dev_err(msm_uport->uport.dev,
364 "%s: Could not turn on core clk [%d]\n",
365 __func__, rc);
366 goto core_unprepare;
367 }
368 MSM_HS_DBG("%s: Clock ON successful\n", __func__);
369 return rc;
370core_unprepare:
371 clk_disable_unprepare(msm_uport->pclk);
372busreset:
373 msm_hs_bus_voting(msm_uport, BUS_RESET);
374 return rc;
375}
376
377/*
378 * This function is called initially during probe and then
379 * through the runtime PM framework. The function directly calls
380 * resource apis to disable them.
381 */
382static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
383{
384 clk_disable_unprepare(msm_uport->clk);
385 if (msm_uport->pclk)
386 clk_disable_unprepare(msm_uport->pclk);
387 msm_hs_bus_voting(msm_uport, BUS_RESET);
388 MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
389}
390
391 /* Remove vote for resources when done */
392static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
393{
394 struct uart_port *uport = &(msm_uport->uport);
395 int rc = atomic_read(&msm_uport->resource_count);
396
397 MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
398 if (rc <= 0) {
399 MSM_HS_WARN("%s(): rc zero, bailing\n", __func__);
400 WARN_ON(1);
401 return;
402 }
403 atomic_dec(&msm_uport->resource_count);
404 pm_runtime_mark_last_busy(uport->dev);
405 pm_runtime_put_autosuspend(uport->dev);
406}
407
408 /* Vote for resources before accessing them */
409static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
410{
411 int ret;
412 struct uart_port *uport = &(msm_uport->uport);
413
414 ret = pm_runtime_get_sync(uport->dev);
415 if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
416 MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
417 __func__, dev_name(uport->dev), ret,
418 msm_uport->pm_state);
419 msm_hs_pm_resume(uport->dev);
420 }
421 atomic_inc(&msm_uport->resource_count);
422}
423
424/* Check if the uport line number matches with user id stored in pdata.
425 * User id information is stored during initialization. This function
426 * ensues that the same device is selected
427 */
428
429static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
430{
431 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
432 struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
433
434 if ((!msm_uport) || (msm_uport->uport.line != pdev->id
435 && msm_uport->uport.line != pdata->userid)) {
436 pr_err("uport line number mismatch!");
437 WARN_ON(1);
438 return NULL;
439 }
440
441 return msm_uport;
442}
443
444static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
445 char *buf)
446{
447 int state = 1;
448 ssize_t ret = 0;
449 struct platform_device *pdev = container_of(dev, struct
450 platform_device, dev);
451 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
452
453 /* This check should not fail */
454 if (msm_uport) {
455 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
456 state = 0;
457 ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
458 }
459 return ret;
460}
461
462static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
463 const char *buf, size_t count)
464{
465 int state;
466 ssize_t ret = 0;
467 struct platform_device *pdev = container_of(dev, struct
468 platform_device, dev);
469 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
470
471 /* This check should not fail */
472 if (msm_uport) {
473 state = buf[0] - '0';
474 switch (state) {
475 case 0:
476 MSM_HS_DBG("%s: Request clock OFF\n", __func__);
477 msm_hs_request_clock_off(&msm_uport->uport);
478 ret = count;
479 break;
480 case 1:
481 MSM_HS_DBG("%s: Request clock ON\n", __func__);
482 msm_hs_request_clock_on(&msm_uport->uport);
483 ret = count;
484 break;
485 default:
486 ret = -EINVAL;
487 }
488 }
489 return ret;
490}
491
492static DEVICE_ATTR(clock, 0644, show_clock, set_clock);
493
494static ssize_t show_debug_mask(struct device *dev,
495 struct device_attribute *attr, char *buf)
496{
497 ssize_t ret = 0;
498 struct platform_device *pdev = container_of(dev, struct
499 platform_device, dev);
500 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
501
502 /* This check should not fail */
503 if (msm_uport)
504 ret = snprintf(buf, sizeof(int), "%u\n",
505 msm_uport->ipc_debug_mask);
506 return ret;
507}
508
509static ssize_t set_debug_mask(struct device *dev,
510 struct device_attribute *attr,
511 const char *buf, size_t count)
512{
513 struct platform_device *pdev = container_of(dev, struct
514 platform_device, dev);
515 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
516
517 /* This check should not fail */
518 if (msm_uport) {
519 msm_uport->ipc_debug_mask = buf[0] - '0';
520 if (msm_uport->ipc_debug_mask < FATAL_LEV ||
521 msm_uport->ipc_debug_mask > DBG_LEV) {
522 /* set to default level */
523 msm_uport->ipc_debug_mask = INFO_LEV;
524 MSM_HS_ERR("Range is 0 to 4;Set to default level 3\n");
525 return -EINVAL;
526 }
527 }
528 return count;
529}
530
531static DEVICE_ATTR(debug_mask, 0644, show_debug_mask,
532 set_debug_mask);
533
534static inline bool is_use_low_power_wakeup(struct msm_hs_port *msm_uport)
535{
536 return msm_uport->wakeup.irq > 0;
537}
538
539static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
540{
541 int ret;
542
543 if (msm_uport->bus_perf_client) {
544 MSM_HS_DBG("Bus voting:%d\n", vote);
545 ret = msm_bus_scale_client_update_request(
546 msm_uport->bus_perf_client, vote);
547 if (ret)
548 MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
549 __func__, vote);
550 }
551}
552
553static inline unsigned int msm_hs_read(struct uart_port *uport,
554 unsigned int index)
555{
556 return readl_relaxed(uport->membase + index);
557}
558
559static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
560 unsigned int value)
561{
562 writel_relaxed(value, uport->membase + index);
563}
564
565static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
566{
567 struct sps_connect config;
568 int ret;
569
570 ret = sps_get_config(sps_pipe_handler, &config);
571 if (ret) {
572 pr_err("%s: sps_get_config() failed ret %d\n", __func__, ret);
573 return ret;
574 }
575 config.options |= SPS_O_POLL;
576 ret = sps_set_config(sps_pipe_handler, &config);
577 if (ret) {
578 pr_err("%s: sps_set_config() failed ret %d\n", __func__, ret);
579 return ret;
580 }
581 return sps_disconnect(sps_pipe_handler);
582}
583
584static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
585 char *prefix, char *string, u64 addr, int size)
586
587{
588 char buf[(BUF_DUMP_SIZE * 3) + 2];
589 int len = 0;
590
591 len = min(size, BUF_DUMP_SIZE);
592 /*
593 * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
594 * don't include the ASCII text at the end of the buffer.
595 */
596 hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
597 ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
598 (unsigned int)addr, size, buf);
599}
600
601/*
602 * This API read and provides UART Core registers information.
603 */
604static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
605{
606 struct uart_port *uport = &(msm_uport->uport);
607
608 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
609 MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
610 __func__, atomic_read(&msm_uport->resource_count));
611 return;
612 }
613
614 MSM_HS_DBG(
615 "MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
616 msm_hs_read(uport, UART_DM_MR1),
617 msm_hs_read(uport, UART_DM_MR2),
618 msm_hs_read(uport, UART_DM_TFWR),
619 msm_hs_read(uport, UART_DM_RFWR),
620 msm_hs_read(uport, UART_DM_DMEN),
621 msm_hs_read(uport, UART_DM_IMR),
622 msm_hs_read(uport, UART_DM_MISR),
623 msm_hs_read(uport, UART_DM_NCF_TX));
624 MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
625 msm_hs_read(uport, UART_DM_SR),
626 msm_hs_read(uport, UART_DM_ISR),
627 msm_hs_read(uport, UART_DM_DMRX),
628 msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
629 msm_hs_read(uport, UART_DM_TXFS),
630 msm_hs_read(uport, UART_DM_RXFS));
631 MSM_HS_DBG("rx.flush:%u\n", msm_uport->rx.flush);
632}
633
634static int msm_serial_loopback_enable_set(void *data, u64 val)
635{
636 struct msm_hs_port *msm_uport = data;
637 struct uart_port *uport = &(msm_uport->uport);
638 unsigned long flags;
639 int ret = 0;
640
641 msm_hs_resource_vote(msm_uport);
642
643 if (val) {
644 spin_lock_irqsave(&uport->lock, flags);
645 ret = msm_hs_read(uport, UART_DM_MR2);
646 ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
647 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
648 msm_hs_write(uport, UART_DM_MR2, ret);
649 spin_unlock_irqrestore(&uport->lock, flags);
650 } else {
651 spin_lock_irqsave(&uport->lock, flags);
652 ret = msm_hs_read(uport, UART_DM_MR2);
653 ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
654 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
655 msm_hs_write(uport, UART_DM_MR2, ret);
656 spin_unlock_irqrestore(&uport->lock, flags);
657 }
658 /* Calling CLOCK API. Hence mb() requires here. */
659 mb();
660
661 msm_hs_resource_unvote(msm_uport);
662 return 0;
663}
664
665static int msm_serial_loopback_enable_get(void *data, u64 *val)
666{
667 struct msm_hs_port *msm_uport = data;
668 struct uart_port *uport = &(msm_uport->uport);
669 unsigned long flags;
670 int ret = 0;
671
672 msm_hs_resource_vote(msm_uport);
673
674 spin_lock_irqsave(&uport->lock, flags);
675 ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
676 spin_unlock_irqrestore(&uport->lock, flags);
677
678 msm_hs_resource_unvote(msm_uport);
679
680 *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
681
682 return 0;
683}
684DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
685 msm_serial_loopback_enable_set, "%llu\n");
686
687/*
688 * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
689 * writing 1 turns on internal loopback mode in HW. Useful for automation
690 * test scripts.
691 * writing 0 disables the internal loopback mode. Default is disabled.
692 */
693static void msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
694 int id)
695{
696 char node_name[15];
697
698 snprintf(node_name, sizeof(node_name), "loopback.%d", id);
699 msm_uport->loopback_dir = debugfs_create_file(node_name,
700 0644,
701 debug_base,
702 msm_uport,
703 &loopback_enable_fops);
704
705 if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
706 MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry",
707 __func__, id);
708}
709
710static int msm_hs_remove(struct platform_device *pdev)
711{
712
713 struct msm_hs_port *msm_uport;
714 struct device *dev;
715
716 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
717 pr_err("Invalid plaform device ID = %d\n", pdev->id);
718 return -EINVAL;
719 }
720
721 msm_uport = get_matching_hs_port(pdev);
722 if (!msm_uport)
723 return -EINVAL;
724
725 dev = msm_uport->uport.dev;
726 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
727 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_debug_mask.attr);
728 debugfs_remove(msm_uport->loopback_dir);
729
730 dma_free_coherent(msm_uport->uport.dev,
731 UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
732 msm_uport->rx.buffer, msm_uport->rx.rbuffer);
733
734 msm_uport->rx.buffer = NULL;
735 msm_uport->rx.rbuffer = 0;
736
737 destroy_workqueue(msm_uport->hsuart_wq);
738 mutex_destroy(&msm_uport->mtx);
739
740 uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
741 clk_put(msm_uport->clk);
742 if (msm_uport->pclk)
743 clk_put(msm_uport->pclk);
744
745 iounmap(msm_uport->uport.membase);
746
747 return 0;
748}
749
750
751/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
752 *
753 * Also registers a SPS callback function for the consumer
754 * process with the SPS driver
755 *
756 * @uport - Pointer to uart uport structure
757 *
758 * @return - 0 if successful else negative value.
759 *
760 */
761
762static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
763{
764 int ret;
765 struct uart_port *uport = &msm_uport->uport;
766 struct msm_hs_tx *tx = &msm_uport->tx;
767 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
768 struct sps_connect *sps_config = &tx->cons.config;
769 struct sps_register_event *sps_event = &tx->cons.event;
770 unsigned long flags;
771 unsigned int data;
772
773 if (tx->flush != FLUSH_SHUTDOWN) {
774 MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
775 return 0;
776 }
777
778 /* Establish connection between peripheral and memory endpoint */
779 ret = sps_connect(sps_pipe_handle, sps_config);
780 if (ret) {
781 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
782 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
783 return ret;
784 }
785 /* Register callback event for EOT (End of transfer) event. */
786 ret = sps_register_event(sps_pipe_handle, sps_event);
787 if (ret) {
788 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
789 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
790 goto reg_event_err;
791 }
792
793 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
794 msm_uport->tx.flush = FLUSH_STOP;
795 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
796
797 data = msm_hs_read(uport, UART_DM_DMEN);
798 /* Enable UARTDM Tx BAM Interface */
799 data |= UARTDM_TX_BAM_ENABLE_BMSK;
800 msm_hs_write(uport, UART_DM_DMEN, data);
801
802 msm_hs_write(uport, UART_DM_CR, RESET_TX);
803 msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
804 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
805
806 MSM_HS_DBG("%s(): TX Connect", __func__);
807 return 0;
808
809reg_event_err:
810 sps_disconnect(sps_pipe_handle);
811 return ret;
812}
813
814/* Connect a UART peripheral's SPS endpoint(producer endpoint)
815 *
816 * Also registers a SPS callback function for the producer
817 * process with the SPS driver
818 *
819 * @uport - Pointer to uart uport structure
820 *
821 * @return - 0 if successful else negative value.
822 *
823 */
824
825static int msm_hs_spsconnect_rx(struct uart_port *uport)
826{
827 int ret;
828 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
829 struct msm_hs_rx *rx = &msm_uport->rx;
830 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
831 struct sps_connect *sps_config = &rx->prod.config;
832 struct sps_register_event *sps_event = &rx->prod.event;
833 unsigned long flags;
834
835 /* Establish connection between peripheral and memory endpoint */
836 ret = sps_connect(sps_pipe_handle, sps_config);
837 if (ret) {
838 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
839 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
840 return ret;
841 }
842 /* Register callback event for DESC_DONE event. */
843 ret = sps_register_event(sps_pipe_handle, sps_event);
844 if (ret) {
845 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
846 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
847 goto reg_event_err;
848 }
849 spin_lock_irqsave(&uport->lock, flags);
850 if (msm_uport->rx.pending_flag)
851 MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
852 __func__, msm_uport->rx.pending_flag);
853 msm_uport->rx.queued_flag = 0;
854 msm_uport->rx.pending_flag = 0;
855 msm_uport->rx.rx_inx = 0;
856 msm_uport->rx.flush = FLUSH_STOP;
857 spin_unlock_irqrestore(&uport->lock, flags);
858 MSM_HS_DBG("%s(): RX Connect\n", __func__);
859 return 0;
860
861reg_event_err:
862 sps_disconnect(sps_pipe_handle);
863 return ret;
864}
865
866/*
867 * programs the UARTDM_CSR register with correct bit rates
868 *
869 * Interrupts should be disabled before we are called, as
870 * we modify Set Baud rate
871 * Set receive stale interrupt level, dependent on Bit Rate
872 * Goal is to have around 8 ms before indicate stale.
873 * roundup (((Bit Rate * .008) / 10) + 1
874 */
875static void msm_hs_set_bps_locked(struct uart_port *uport,
876 unsigned int bps)
877{
878 unsigned long rxstale;
879 unsigned long data;
880 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
881
882 switch (bps) {
883 case 300:
884 msm_hs_write(uport, UART_DM_CSR, 0x00);
885 rxstale = 1;
886 break;
887 case 600:
888 msm_hs_write(uport, UART_DM_CSR, 0x11);
889 rxstale = 1;
890 break;
891 case 1200:
892 msm_hs_write(uport, UART_DM_CSR, 0x22);
893 rxstale = 1;
894 break;
895 case 2400:
896 msm_hs_write(uport, UART_DM_CSR, 0x33);
897 rxstale = 1;
898 break;
899 case 4800:
900 msm_hs_write(uport, UART_DM_CSR, 0x44);
901 rxstale = 1;
902 break;
903 case 9600:
904 msm_hs_write(uport, UART_DM_CSR, 0x55);
905 rxstale = 2;
906 break;
907 case 14400:
908 msm_hs_write(uport, UART_DM_CSR, 0x66);
909 rxstale = 3;
910 break;
911 case 19200:
912 msm_hs_write(uport, UART_DM_CSR, 0x77);
913 rxstale = 4;
914 break;
915 case 28800:
916 msm_hs_write(uport, UART_DM_CSR, 0x88);
917 rxstale = 6;
918 break;
919 case 38400:
920 msm_hs_write(uport, UART_DM_CSR, 0x99);
921 rxstale = 8;
922 break;
923 case 57600:
924 msm_hs_write(uport, UART_DM_CSR, 0xaa);
925 rxstale = 16;
926 break;
927 case 76800:
928 msm_hs_write(uport, UART_DM_CSR, 0xbb);
929 rxstale = 16;
930 break;
931 case 115200:
932 msm_hs_write(uport, UART_DM_CSR, 0xcc);
933 rxstale = 31;
934 break;
935 case 230400:
936 msm_hs_write(uport, UART_DM_CSR, 0xee);
937 rxstale = 31;
938 break;
939 case 460800:
940 msm_hs_write(uport, UART_DM_CSR, 0xff);
941 rxstale = 31;
942 break;
943 case 4000000:
944 case 3686400:
945 case 3200000:
946 case 3500000:
947 case 3000000:
948 case 2500000:
949 case 2000000:
950 case 1500000:
951 case 1152000:
952 case 1000000:
953 case 921600:
954 msm_hs_write(uport, UART_DM_CSR, 0xff);
955 rxstale = 31;
956 break;
957 default:
958 msm_hs_write(uport, UART_DM_CSR, 0xff);
959 /* default to 9600 */
960 bps = 9600;
961 rxstale = 2;
962 break;
963 }
964 /*
965 * uart baud rate depends on CSR and MND Values
966 * we are updating CSR before and then calling
967 * clk_set_rate which updates MND Values. Hence
968 * dsb requires here.
969 */
970 mb();
971 if (bps > 460800) {
972 uport->uartclk = bps * 16;
973 /* BLSP based UART supports maximum clock frequency
974 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
975 * UART can support baud rate of 3.94 Mbps which is
976 * equivalent to 4 Mbps.
977 * UART hardware is robust enough to handle this
978 * deviation to achieve baud rate ~4 Mbps.
979 */
980 if (bps == 4000000)
981 uport->uartclk = BLSP_UART_CLK_FMAX;
982 } else {
983 uport->uartclk = 7372800;
984 }
985
986 if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
987 MSM_HS_WARN("Error setting clock rate on UART\n");
988 WARN_ON(1);
989 }
990
991 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
992 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
993
994 msm_hs_write(uport, UART_DM_IPR, data);
995 /*
996 * It is suggested to do reset of transmitter and receiver after
997 * changing any protocol configuration. Here Baud rate and stale
998 * timeout are getting updated. Hence reset transmitter and receiver.
999 */
1000 msm_hs_write(uport, UART_DM_CR, RESET_TX);
1001 msm_hs_write(uport, UART_DM_CR, RESET_RX);
1002}
1003
1004
1005static void msm_hs_set_std_bps_locked(struct uart_port *uport,
1006 unsigned int bps)
1007{
1008 unsigned long rxstale;
1009 unsigned long data;
1010
1011 switch (bps) {
1012 case 9600:
1013 msm_hs_write(uport, UART_DM_CSR, 0x99);
1014 rxstale = 2;
1015 break;
1016 case 14400:
1017 msm_hs_write(uport, UART_DM_CSR, 0xaa);
1018 rxstale = 3;
1019 break;
1020 case 19200:
1021 msm_hs_write(uport, UART_DM_CSR, 0xbb);
1022 rxstale = 4;
1023 break;
1024 case 28800:
1025 msm_hs_write(uport, UART_DM_CSR, 0xcc);
1026 rxstale = 6;
1027 break;
1028 case 38400:
1029 msm_hs_write(uport, UART_DM_CSR, 0xdd);
1030 rxstale = 8;
1031 break;
1032 case 57600:
1033 msm_hs_write(uport, UART_DM_CSR, 0xee);
1034 rxstale = 16;
1035 break;
1036 case 115200:
1037 msm_hs_write(uport, UART_DM_CSR, 0xff);
1038 rxstale = 31;
1039 break;
1040 default:
1041 msm_hs_write(uport, UART_DM_CSR, 0x99);
1042 /* default to 9600 */
1043 bps = 9600;
1044 rxstale = 2;
1045 break;
1046 }
1047
1048 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
1049 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
1050
1051 msm_hs_write(uport, UART_DM_IPR, data);
1052}
1053
1054static void msm_hs_enable_flow_control(struct uart_port *uport, bool override)
1055{
1056 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1057 unsigned int data;
1058
1059 if (msm_uport->flow_control || override) {
1060 /* Enable RFR line */
1061 msm_hs_write(uport, UART_DM_CR, RFR_LOW);
1062 /* Enable auto RFR */
1063 data = msm_hs_read(uport, UART_DM_MR1);
1064 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1065 msm_hs_write(uport, UART_DM_MR1, data);
1066 /* Ensure register IO completion */
1067 mb();
1068 }
1069}
1070
1071static void msm_hs_disable_flow_control(struct uart_port *uport, bool override)
1072{
1073 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1074 unsigned int data;
1075
1076 /*
1077 * Clear the Rx Ready Ctl bit - This ensures that
1078 * flow control lines stop the other side from sending
1079 * data while we change the parameters
1080 */
1081
1082 if (msm_uport->flow_control || override) {
1083 data = msm_hs_read(uport, UART_DM_MR1);
1084 /* disable auto ready-for-receiving */
1085 data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
1086 msm_hs_write(uport, UART_DM_MR1, data);
1087 /* Disable RFR line */
1088 msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
1089 /* Ensure register IO completion */
1090 mb();
1091 }
1092}
1093
1094/*
1095 * termios : new ktermios
1096 * oldtermios: old ktermios previous setting
1097 *
1098 * Configure the serial port
1099 */
1100static void msm_hs_set_termios(struct uart_port *uport,
1101 struct ktermios *termios,
1102 struct ktermios *oldtermios)
1103{
1104 unsigned int bps;
1105 unsigned long data;
1106 unsigned int c_cflag = termios->c_cflag;
1107 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1108
1109 /**
1110 * set_termios can be invoked from the framework when
1111 * the clocks are off and the client has not had a chance
1112 * to turn them on. Make sure that they are on
1113 */
1114 msm_hs_resource_vote(msm_uport);
1115 mutex_lock(&msm_uport->mtx);
1116 msm_hs_write(uport, UART_DM_IMR, 0);
1117
1118 msm_hs_disable_flow_control(uport, true);
1119
1120 /*
1121 * Disable Rx channel of UARTDM
1122 * DMA Rx Stall happens if enqueue and flush of Rx command happens
1123 * concurrently. Hence before changing the baud rate/protocol
1124 * configuration and sending flush command to ADM, disable the Rx
1125 * channel of UARTDM.
1126 * Note: should not reset the receiver here immediately as it is not
1127 * suggested to do disable/reset or reset/disable at the same time.
1128 */
1129 data = msm_hs_read(uport, UART_DM_DMEN);
1130 /* Disable UARTDM RX BAM Interface */
1131 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1132 msm_hs_write(uport, UART_DM_DMEN, data);
1133
1134 /*
1135 * Reset RX and TX.
1136 * Resetting the RX enables it, therefore we must reset and disable.
1137 */
1138 msm_hs_write(uport, UART_DM_CR, RESET_RX);
1139 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
1140 msm_hs_write(uport, UART_DM_CR, RESET_TX);
1141
1142 /* 300 is the minimum baud support by the driver */
1143 bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
1144
1145 /* Temporary remapping 200 BAUD to 3.2 mbps */
1146 if (bps == 200)
1147 bps = 3200000;
1148
1149 uport->uartclk = clk_get_rate(msm_uport->clk);
1150 if (!uport->uartclk)
1151 msm_hs_set_std_bps_locked(uport, bps);
1152 else
1153 msm_hs_set_bps_locked(uport, bps);
1154
1155 data = msm_hs_read(uport, UART_DM_MR2);
1156 data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
1157 /* set parity */
1158 if (c_cflag & PARENB) {
1159 if (c_cflag & PARODD)
1160 data |= ODD_PARITY;
1161 else if (c_cflag & CMSPAR)
1162 data |= SPACE_PARITY;
1163 else
1164 data |= EVEN_PARITY;
1165 }
1166
1167 /* Set bits per char */
1168 data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
1169
1170 switch (c_cflag & CSIZE) {
1171 case CS5:
1172 data |= FIVE_BPC;
1173 break;
1174 case CS6:
1175 data |= SIX_BPC;
1176 break;
1177 case CS7:
1178 data |= SEVEN_BPC;
1179 break;
1180 default:
1181 data |= EIGHT_BPC;
1182 break;
1183 }
1184 /* stop bits */
1185 if (c_cflag & CSTOPB) {
1186 data |= STOP_BIT_TWO;
1187 } else {
1188 /* otherwise 1 stop bit */
1189 data |= STOP_BIT_ONE;
1190 }
1191 data |= UARTDM_MR2_ERROR_MODE_BMSK;
1192 /* write parity/bits per char/stop bit configuration */
1193 msm_hs_write(uport, UART_DM_MR2, data);
1194
1195 uport->ignore_status_mask = termios->c_iflag & INPCK;
1196 uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
1197 uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
1198
1199 uport->read_status_mask = (termios->c_cflag & CREAD);
1200
1201 /* Set Transmit software time out */
1202 uart_update_timeout(uport, c_cflag, bps);
1203
1204 /* Enable UARTDM Rx BAM Interface */
1205 data = msm_hs_read(uport, UART_DM_DMEN);
1206 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1207 msm_hs_write(uport, UART_DM_DMEN, data);
1208 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
1209 /* Issue TX,RX BAM Start IFC command */
1210 msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
1211 msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
1212 /* Ensure Register Writes Complete */
1213 mb();
1214
1215 /* Configure HW flow control
1216 * UART Core would see status of CTS line when it is sending data
1217 * to remote uart to confirm that it can receive or not.
1218 * UART Core would trigger RFR if it is not having any space with
1219 * RX FIFO.
1220 */
1221 /* Pulling RFR line high */
1222 msm_hs_write(uport, UART_DM_CR, RFR_LOW);
1223 data = msm_hs_read(uport, UART_DM_MR1);
1224 data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
1225 if (c_cflag & CRTSCTS) {
1226 data |= UARTDM_MR1_CTS_CTL_BMSK;
1227 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1228 msm_uport->flow_control = true;
1229 }
1230 msm_hs_write(uport, UART_DM_MR1, data);
1231 MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
1232
1233 mutex_unlock(&msm_uport->mtx);
1234
1235 msm_hs_resource_unvote(msm_uport);
1236}
1237
1238/*
1239 * Standard API, Transmitter
1240 * Any character in the transmit shift register is sent
1241 */
1242unsigned int msm_hs_tx_empty(struct uart_port *uport)
1243{
1244 unsigned int data;
1245 unsigned int isr;
1246 unsigned int ret = 0;
1247 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1248
1249 msm_hs_resource_vote(msm_uport);
1250 data = msm_hs_read(uport, UART_DM_SR);
1251 isr = msm_hs_read(uport, UART_DM_ISR);
1252 msm_hs_resource_unvote(msm_uport);
1253 MSM_HS_INFO("%s(): SR:0x%x ISR:0x%x ", __func__, data, isr);
1254
1255 if (data & UARTDM_SR_TXEMT_BMSK) {
1256 ret = TIOCSER_TEMT;
1257 } else
1258 /*
1259 * Add an extra sleep here because sometimes the framework's
1260 * delay (based on baud rate) isn't good enough.
1261 * Note that this won't happen during every port close, only
1262 * on select occassions when the userspace does back to back
1263 * write() and close().
1264 */
1265 usleep_range(5000, 7000);
1266
1267 return ret;
1268}
1269EXPORT_SYMBOL(msm_hs_tx_empty);
1270
1271/*
1272 * Standard API, Stop transmitter.
1273 * Any character in the transmit shift register is sent as
1274 * well as the current data mover transfer .
1275 */
1276static void msm_hs_stop_tx_locked(struct uart_port *uport)
1277{
1278 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1279 struct msm_hs_tx *tx = &msm_uport->tx;
1280
1281 tx->flush = FLUSH_STOP;
1282}
1283
1284static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
1285{
1286 struct msm_hs_rx *rx = &msm_uport->rx;
1287 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
1288 int ret = 0;
1289
1290 ret = sps_rx_disconnect(sps_pipe_handle);
1291
1292 if (msm_uport->rx.pending_flag)
1293 MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
1294 __func__, msm_uport->rx.pending_flag);
1295 MSM_HS_DBG("%s(): clearing desc usage flag", __func__);
1296 msm_uport->rx.queued_flag = 0;
1297 msm_uport->rx.pending_flag = 0;
1298 msm_uport->rx.rx_inx = 0;
1299
1300 if (ret)
1301 MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
1302 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1303 MSM_HS_DBG("%s: Calling Completion\n", __func__);
1304 wake_up(&msm_uport->bam_disconnect_wait);
1305 MSM_HS_DBG("%s: Done Completion\n", __func__);
1306 wake_up(&msm_uport->rx.wait);
1307 return ret;
1308}
1309
1310static int sps_tx_disconnect(struct msm_hs_port *msm_uport)
1311{
1312 struct uart_port *uport = &msm_uport->uport;
1313 struct msm_hs_tx *tx = &msm_uport->tx;
1314 struct sps_pipe *tx_pipe = tx->cons.pipe_handle;
1315 unsigned long flags;
1316 int ret = 0;
1317
1318 if (msm_uport->tx.flush == FLUSH_SHUTDOWN) {
1319 MSM_HS_DBG("%s(): pipe already disonnected", __func__);
1320 return ret;
1321 }
1322
1323 ret = sps_disconnect(tx_pipe);
1324
1325 if (ret) {
1326 MSM_HS_ERR("%s(): sps_disconnect failed %d", __func__, ret);
1327 return ret;
1328 }
1329
1330 spin_lock_irqsave(&uport->lock, flags);
1331 msm_uport->tx.flush = FLUSH_SHUTDOWN;
1332 spin_unlock_irqrestore(&uport->lock, flags);
1333
1334 MSM_HS_DBG("%s(): TX Disconnect", __func__);
1335 return ret;
1336}
1337
1338static void msm_hs_disable_rx(struct uart_port *uport)
1339{
1340 unsigned int data;
1341
1342 data = msm_hs_read(uport, UART_DM_DMEN);
1343 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1344 msm_hs_write(uport, UART_DM_DMEN, data);
1345}
1346
1347/*
1348 * Standard API, Stop receiver as soon as possible.
1349 *
1350 * Function immediately terminates the operation of the
1351 * channel receiver and any incoming characters are lost. None
1352 * of the receiver status bits are affected by this command and
1353 * characters that are already in the receive FIFO there.
1354 */
1355static void msm_hs_stop_rx_locked(struct uart_port *uport)
1356{
1357 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1358
1359 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
1360 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
1361 else
1362 msm_hs_disable_rx(uport);
1363
1364 if (msm_uport->rx.flush == FLUSH_NONE)
1365 msm_uport->rx.flush = FLUSH_STOP;
1366}
1367
1368static void msm_hs_disconnect_rx(struct uart_port *uport)
1369{
1370 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1371
1372 msm_hs_disable_rx(uport);
1373 /* Disconnect the BAM RX pipe */
1374 if (msm_uport->rx.flush == FLUSH_NONE)
1375 msm_uport->rx.flush = FLUSH_STOP;
1376 disconnect_rx_endpoint(msm_uport);
1377 MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush);
1378}
1379
1380/* Tx timeout callback function */
1381void tx_timeout_handler(unsigned long arg)
1382{
1383 struct msm_hs_port *msm_uport = (struct msm_hs_port *) arg;
1384 struct uart_port *uport = &msm_uport->uport;
1385 int isr;
1386
1387 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
1388 MSM_HS_WARN("%s(): clocks are off", __func__);
1389 return;
1390 }
1391
1392 isr = msm_hs_read(uport, UART_DM_ISR);
1393 if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
1394 MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
1395 dump_uart_hs_registers(msm_uport);
1396}
1397
1398/* Transmit the next chunk of data */
1399static void msm_hs_submit_tx_locked(struct uart_port *uport)
1400{
1401 int left;
1402 int tx_count;
1403 int aligned_tx_count;
1404 dma_addr_t src_addr;
1405 dma_addr_t aligned_src_addr;
1406 u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
1407 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1408 struct msm_hs_tx *tx = &msm_uport->tx;
1409 struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
1410 struct sps_pipe *sps_pipe_handle;
1411 int ret;
1412
1413 if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
1414 tx->dma_in_flight = false;
1415 msm_hs_stop_tx_locked(uport);
1416 return;
1417 }
1418
1419 tx_count = uart_circ_chars_pending(tx_buf);
1420
1421 if (tx_count > UARTDM_TX_BUF_SIZE)
1422 tx_count = UARTDM_TX_BUF_SIZE;
1423
1424 left = UART_XMIT_SIZE - tx_buf->tail;
1425
1426 if (tx_count > left)
1427 tx_count = left;
1428
1429 src_addr = tx->dma_base + tx_buf->tail;
1430 /* Mask the src_addr to align on a cache
1431 * and add those bytes to tx_count
1432 */
1433 aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
1434 aligned_tx_count = tx_count + src_addr - aligned_src_addr;
1435
1436 dma_sync_single_for_device(uport->dev, aligned_src_addr,
1437 aligned_tx_count, DMA_TO_DEVICE);
1438
1439 tx->tx_count = tx_count;
1440
1441 hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
1442 &tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
1443 sps_pipe_handle = tx->cons.pipe_handle;
1444
1445 /* Set 1 second timeout */
1446 mod_timer(&tx->tx_timeout_timer,
1447 jiffies + msecs_to_jiffies(MSEC_PER_SEC));
1448 /* Queue transfer request to SPS */
1449 ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
1450 msm_uport, flags);
1451
1452 MSM_HS_DBG("%s:Enqueue Tx Cmd, ret %d\n", __func__, ret);
1453}
1454
1455/* This function queues the rx descriptor for BAM transfer */
1456static void msm_hs_post_rx_desc(struct msm_hs_port *msm_uport, int inx)
1457{
1458 u32 flags = SPS_IOVEC_FLAG_INT;
1459 struct msm_hs_rx *rx = &msm_uport->rx;
1460 int ret;
1461
1462 phys_addr_t rbuff_addr = rx->rbuffer + (UARTDM_RX_BUF_SIZE * inx);
1463 u8 *virt_addr = rx->buffer + (UARTDM_RX_BUF_SIZE * inx);
1464
1465 MSM_HS_DBG("%s: %d:Queue desc %d, 0x%llx, base 0x%llx virtaddr %p",
1466 __func__, msm_uport->uport.line, inx,
1467 (u64)rbuff_addr, (u64)rx->rbuffer, virt_addr);
1468
1469 rx->iovec[inx].size = 0;
1470 ret = sps_transfer_one(rx->prod.pipe_handle, rbuff_addr,
1471 UARTDM_RX_BUF_SIZE, msm_uport, flags);
1472
1473 if (ret)
1474 MSM_HS_ERR("Error processing descriptor %d", ret);
1475}
1476
1477/* Update the rx descriptor index to specify the next one to be processed */
1478static void msm_hs_mark_next(struct msm_hs_port *msm_uport, int inx)
1479{
1480 struct msm_hs_rx *rx = &msm_uport->rx;
1481 int prev;
1482
1483 inx %= UART_DMA_DESC_NR;
1484 MSM_HS_DBG("%s(): inx %d, pending 0x%lx", __func__, inx,
1485 rx->pending_flag);
1486
1487 if (!inx)
1488 prev = UART_DMA_DESC_NR - 1;
1489 else
1490 prev = inx - 1;
1491
1492 if (!test_bit(prev, &rx->pending_flag))
1493 msm_uport->rx.rx_inx = inx;
1494 MSM_HS_DBG("%s(): prev %d pending flag 0x%lx, next %d", __func__,
1495 prev, rx->pending_flag, msm_uport->rx.rx_inx);
1496}
1497
1498/*
1499 * Queue the rx descriptor that has just been processed or
1500 * all of them if queueing for the first time
1501 */
1502static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport)
1503{
1504 struct msm_hs_rx *rx = &msm_uport->rx;
1505 int i, flag = 0;
1506
1507 /* At first, queue all, if not, queue only one */
1508 if (rx->queued_flag || rx->pending_flag) {
1509 if (!test_bit(rx->rx_inx, &rx->queued_flag) &&
1510 !test_bit(rx->rx_inx, &rx->pending_flag)) {
1511 msm_hs_post_rx_desc(msm_uport, rx->rx_inx);
1512 set_bit(rx->rx_inx, &rx->queued_flag);
1513 MSM_HS_DBG("%s(): Set Queued Bit %d",
1514 __func__, rx->rx_inx);
1515 } else
1516 MSM_HS_ERR("%s(): rx_inx pending or queued", __func__);
1517 return;
1518 }
1519
1520 for (i = 0; i < UART_DMA_DESC_NR; i++) {
1521 if (!test_bit(i, &rx->queued_flag) &&
1522 !test_bit(i, &rx->pending_flag)) {
1523 MSM_HS_DBG("%s(): Calling post rx %d", __func__, i);
1524 msm_hs_post_rx_desc(msm_uport, i);
1525 set_bit(i, &rx->queued_flag);
1526 flag = 1;
1527 }
1528 }
1529
1530 if (!flag)
1531 MSM_HS_ERR("%s(): error queueing descriptor", __func__);
1532}
1533
1534/* Start to receive the next chunk of data */
1535static void msm_hs_start_rx_locked(struct uart_port *uport)
1536{
1537 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1538 struct msm_hs_rx *rx = &msm_uport->rx;
1539 unsigned int buffer_pending = msm_uport->rx.buffer_pending;
1540 unsigned int data;
1541
1542 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
1543 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
1544 return;
1545 }
1546 if (rx->pending_flag) {
1547 MSM_HS_INFO("%s: Rx Cmd got executed, wait for rx_tlet\n",
1548 __func__);
1549 rx->flush = FLUSH_IGNORE;
1550 return;
1551 }
1552 if (buffer_pending)
1553 MSM_HS_ERR("Error: rx started in buffer state =%x",
1554 buffer_pending);
1555
1556 msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
1557 msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
1558 msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
1559 /*
1560 * Enable UARTDM Rx Interface as previously it has been
1561 * disable in set_termios before configuring baud rate.
1562 */
1563 data = msm_hs_read(uport, UART_DM_DMEN);
1564 /* Enable UARTDM Rx BAM Interface */
1565 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1566
1567 msm_hs_write(uport, UART_DM_DMEN, data);
1568 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
1569 /* Calling next DMOV API. Hence mb() here. */
1570 mb();
1571
1572 /*
1573 * RX-transfer will be automatically re-activated
1574 * after last data of previous transfer was read.
1575 */
1576 data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
1577 RX_DMRX_CYCLIC_EN);
1578 msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
1579 /* Issue RX BAM Start IFC command */
1580 msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
1581 /* Ensure register IO completion */
1582 mb();
1583
1584 msm_uport->rx.flush = FLUSH_NONE;
1585 msm_uport->rx_bam_inprogress = true;
1586 msm_hs_queue_rx_desc(msm_uport);
1587 msm_uport->rx_bam_inprogress = false;
1588 wake_up(&msm_uport->rx.wait);
1589 MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
1590}
1591
1592static void flip_insert_work(struct work_struct *work)
1593{
1594 unsigned long flags;
1595 int retval;
1596 struct msm_hs_port *msm_uport =
1597 container_of(work, struct msm_hs_port,
1598 rx.flip_insert_work.work);
1599 struct tty_struct *tty = msm_uport->uport.state->port.tty;
1600
1601 spin_lock_irqsave(&msm_uport->uport.lock, flags);
1602 if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
1603 dev_err(msm_uport->uport.dev,
1604 "%s:Invalid driver state flush %d\n",
1605 __func__, msm_uport->rx.flush);
1606 MSM_HS_ERR("%s:Invalid driver state flush %d\n",
1607 __func__, msm_uport->rx.flush);
1608 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1609 return;
1610 }
1611
1612 if (msm_uport->rx.buffer_pending == NONE_PENDING) {
1613 MSM_HS_ERR("Error: No buffer pending in %s", __func__);
1614 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1615 return;
1616 }
1617 if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
1618 retval = tty_insert_flip_char(tty->port, 0, TTY_OVERRUN);
1619 if (retval)
1620 msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
1621 }
1622 if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
1623 retval = tty_insert_flip_char(tty->port, 0, TTY_PARITY);
1624 if (retval)
1625 msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
1626 }
1627 if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
1628 int rx_count, rx_offset;
1629
1630 rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
1631 rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
1632 retval = tty_insert_flip_string(tty->port,
1633 msm_uport->rx.buffer +
1634 (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)
1635 + rx_offset, rx_count);
1636 msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
1637 PARITY_ERROR);
1638 if (retval != rx_count)
1639 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1640 retval << 8 | (rx_count - retval) << 16;
1641 }
1642 if (msm_uport->rx.buffer_pending) {
1643 schedule_delayed_work(&msm_uport->rx.flip_insert_work,
1644 msecs_to_jiffies(RETRY_TIMEOUT));
1645 } else if (msm_uport->rx.flush <= FLUSH_IGNORE) {
1646 MSM_HS_WARN("Pending buffers cleared, restarting");
1647 clear_bit(msm_uport->rx.rx_inx,
1648 &msm_uport->rx.pending_flag);
1649 msm_hs_start_rx_locked(&msm_uport->uport);
1650 msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
1651 }
1652 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1653 tty_flip_buffer_push(tty->port);
1654}
1655
1656static void msm_serial_hs_rx_work(struct kthread_work *work)
1657{
1658 int retval;
1659 int rx_count = 0;
1660 unsigned long status;
1661 unsigned long flags;
1662 unsigned int error_f = 0;
1663 struct uart_port *uport;
1664 struct msm_hs_port *msm_uport;
1665 unsigned int flush = FLUSH_DATA_INVALID;
1666 struct tty_struct *tty;
1667 struct sps_event_notify *notify;
1668 struct msm_hs_rx *rx;
1669 struct sps_pipe *sps_pipe_handle;
1670 struct platform_device *pdev;
1671 const struct msm_serial_hs_platform_data *pdata;
1672
1673 msm_uport = container_of((struct kthread_work *) work,
1674 struct msm_hs_port, rx.kwork);
1675 msm_hs_resource_vote(msm_uport);
1676 uport = &msm_uport->uport;
1677 tty = uport->state->port.tty;
1678 notify = &msm_uport->notify;
1679 rx = &msm_uport->rx;
1680 pdev = to_platform_device(uport->dev);
1681 pdata = pdev->dev.platform_data;
1682
1683 spin_lock_irqsave(&uport->lock, flags);
1684
1685 if (!tty || rx->flush == FLUSH_SHUTDOWN) {
1686 dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
1687 __func__, rx->flush);
1688 MSM_HS_ERR("%s:Invalid driver state flush %d\n",
1689 __func__, rx->flush);
1690 spin_unlock_irqrestore(&uport->lock, flags);
1691 msm_hs_resource_unvote(msm_uport);
1692 return;
1693 }
1694
1695 /*
1696 * Process all pending descs or if nothing is
1697 * queued - called from termios
1698 */
1699 while (!rx->buffer_pending &&
1700 (rx->pending_flag || !rx->queued_flag)) {
1701 MSM_HS_DBG("%s(): Loop P 0x%lx Q 0x%lx", __func__,
1702 rx->pending_flag, rx->queued_flag);
1703
1704 status = msm_hs_read(uport, UART_DM_SR);
1705
1706 MSM_HS_DBG("In %s\n", __func__);
1707
1708 /* overflow is not connect to data in a FIFO */
1709 if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
1710 (uport->read_status_mask & CREAD))) {
1711 retval = tty_insert_flip_char(tty->port,
1712 0, TTY_OVERRUN);
1713 MSM_HS_WARN("%s(): RX Buffer Overrun Detected\n",
1714 __func__);
1715 if (!retval)
1716 msm_uport->rx.buffer_pending |= TTY_OVERRUN;
1717 uport->icount.buf_overrun++;
1718 error_f = 1;
1719 }
1720
1721 if (!(uport->ignore_status_mask & INPCK))
1722 status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
1723
1724 if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
1725 /* Can not tell diff between parity & frame error */
1726 MSM_HS_WARN("msm_serial_hs: parity error\n");
1727 uport->icount.parity++;
1728 error_f = 1;
1729 if (!(uport->ignore_status_mask & IGNPAR)) {
1730 retval = tty_insert_flip_char(tty->port,
1731 0, TTY_PARITY);
1732 if (!retval)
1733 msm_uport->rx.buffer_pending
1734 |= TTY_PARITY;
1735 }
1736 }
1737
1738 if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
1739 MSM_HS_DBG("msm_serial_hs: Rx break\n");
1740 uport->icount.brk++;
1741 error_f = 1;
1742 if (!(uport->ignore_status_mask & IGNBRK)) {
1743 retval = tty_insert_flip_char(tty->port,
1744 0, TTY_BREAK);
1745 if (!retval)
1746 msm_uport->rx.buffer_pending
1747 |= TTY_BREAK;
1748 }
1749 }
1750
1751 if (error_f)
1752 msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
1753 flush = msm_uport->rx.flush;
1754 if (flush == FLUSH_IGNORE)
1755 if (!msm_uport->rx.buffer_pending) {
1756 MSM_HS_DBG("%s: calling start_rx_locked\n",
1757 __func__);
1758 msm_hs_start_rx_locked(uport);
1759 }
1760 if (flush >= FLUSH_DATA_INVALID)
1761 goto out;
1762
1763 rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
1764 hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
1765 (msm_uport->rx.buffer +
1766 (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
1767 msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
1768 rx_count);
1769
1770 /*
1771 * We are in a spin locked context, spin lock taken at
1772 * other places where these flags are updated
1773 */
1774 if (0 != (uport->read_status_mask & CREAD)) {
1775 if (!test_bit(msm_uport->rx.rx_inx,
1776 &msm_uport->rx.pending_flag) &&
1777 !test_bit(msm_uport->rx.rx_inx,
1778 &msm_uport->rx.queued_flag))
1779 MSM_HS_ERR("%s: RX INX not set", __func__);
1780 else if (test_bit(msm_uport->rx.rx_inx,
1781 &msm_uport->rx.pending_flag) &&
1782 !test_bit(msm_uport->rx.rx_inx,
1783 &msm_uport->rx.queued_flag)) {
1784 MSM_HS_DBG("%s(): Clear Pending Bit %d",
1785 __func__, msm_uport->rx.rx_inx);
1786
1787 retval = tty_insert_flip_string(tty->port,
1788 msm_uport->rx.buffer +
1789 (msm_uport->rx.rx_inx *
1790 UARTDM_RX_BUF_SIZE),
1791 rx_count);
1792
1793 if (retval != rx_count) {
1794 MSM_HS_INFO("%s(): ret %d rx_count %d",
1795 __func__, retval, rx_count);
1796 msm_uport->rx.buffer_pending |=
1797 CHARS_NORMAL | retval << 5 |
1798 (rx_count - retval) << 16;
1799 }
1800 } else
1801 MSM_HS_ERR("%s: Error in inx %d", __func__,
1802 msm_uport->rx.rx_inx);
1803 }
1804
1805 if (!msm_uport->rx.buffer_pending) {
1806 msm_uport->rx.flush = FLUSH_NONE;
1807 msm_uport->rx_bam_inprogress = true;
1808 sps_pipe_handle = rx->prod.pipe_handle;
1809 MSM_HS_DBG("Queing bam descriptor\n");
1810 /* Queue transfer request to SPS */
1811 clear_bit(msm_uport->rx.rx_inx,
1812 &msm_uport->rx.pending_flag);
1813 msm_hs_queue_rx_desc(msm_uport);
1814 msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
1815 msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
1816 msm_uport->rx_bam_inprogress = false;
1817 wake_up(&msm_uport->rx.wait);
1818 } else
1819 break;
1820
1821 }
1822out:
1823 if (msm_uport->rx.buffer_pending) {
1824 MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
1825 schedule_delayed_work(&msm_uport->rx.flip_insert_work
1826 , msecs_to_jiffies(RETRY_TIMEOUT));
1827 }
1828 /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
1829 spin_unlock_irqrestore(&uport->lock, flags);
1830 if (flush < FLUSH_DATA_INVALID)
1831 tty_flip_buffer_push(tty->port);
1832 msm_hs_resource_unvote(msm_uport);
1833}
1834
1835static void msm_hs_start_tx_locked(struct uart_port *uport)
1836{
1837 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1838 struct msm_hs_tx *tx = &msm_uport->tx;
1839
1840 /* Bail if transfer in progress */
1841 if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
1842 MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
1843 __func__, tx->flush, tx->dma_in_flight);
1844 return;
1845 }
1846
1847 if (!tx->dma_in_flight) {
1848 tx->dma_in_flight = true;
1849 kthread_queue_work(&msm_uport->tx.kworker,
1850 &msm_uport->tx.kwork);
1851 }
1852}
1853
1854/**
1855 * Callback notification from SPS driver
1856 *
1857 * This callback function gets triggered called from
1858 * SPS driver when requested SPS data transfer is
1859 * completed.
1860 *
1861 */
1862
1863static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
1864{
1865 struct msm_hs_port *msm_uport =
1866 (struct msm_hs_port *)
1867 ((struct sps_event_notify *)notify)->user;
1868 phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
1869 notify->data.transfer.iovec.addr);
1870
1871 msm_uport->notify = *notify;
1872 MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
1873 &addr, notify->data.transfer.iovec.size,
1874 notify->data.transfer.iovec.flags);
1875
1876 del_timer(&msm_uport->tx.tx_timeout_timer);
1877 MSM_HS_DBG("%s(): Queue kthread work", __func__);
1878 kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
1879}
1880
1881static void msm_serial_hs_tx_work(struct kthread_work *work)
1882{
1883 unsigned long flags;
1884 struct msm_hs_port *msm_uport =
1885 container_of((struct kthread_work *)work,
1886 struct msm_hs_port, tx.kwork);
1887 struct uart_port *uport = &msm_uport->uport;
1888 struct circ_buf *tx_buf = &uport->state->xmit;
1889 struct msm_hs_tx *tx = &msm_uport->tx;
1890
1891 /*
1892 * Do the work buffer related work in BAM
1893 * mode that is equivalent to legacy mode
1894 */
1895 msm_hs_resource_vote(msm_uport);
1896 if (tx->flush >= FLUSH_STOP) {
1897 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
1898 tx->flush = FLUSH_NONE;
1899 MSM_HS_DBG("%s(): calling submit_tx", __func__);
1900 msm_hs_submit_tx_locked(uport);
1901 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1902 msm_hs_resource_unvote(msm_uport);
1903 return;
1904 }
1905
1906 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
1907 if (!uart_circ_empty(tx_buf))
1908 tx_buf->tail = (tx_buf->tail +
1909 tx->tx_count) & ~UART_XMIT_SIZE;
1910 else
1911 MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
1912
1913 wake_up(&msm_uport->tx.wait);
1914
1915 uport->icount.tx += tx->tx_count;
1916
1917 /*
1918 * Calling to send next chunk of data
1919 * If the circ buffer is empty, we stop
1920 * If the clock off was requested, the clock
1921 * off sequence is kicked off
1922 */
1923 MSM_HS_DBG("%s(): calling submit_tx", __func__);
1924 msm_hs_submit_tx_locked(uport);
1925
1926 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
1927 uart_write_wakeup(uport);
1928
1929 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1930 msm_hs_resource_unvote(msm_uport);
1931}
1932
1933static void
1934msm_hs_mark_proc_rx_desc(struct msm_hs_port *msm_uport,
1935 struct sps_event_notify *notify)
1936{
1937 struct msm_hs_rx *rx = &msm_uport->rx;
1938 phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
1939 notify->data.transfer.iovec.addr);
1940 /* divide by UARTDM_RX_BUF_SIZE */
1941 int inx = (addr - rx->rbuffer) >> 9;
1942
1943 set_bit(inx, &rx->pending_flag);
1944 clear_bit(inx, &rx->queued_flag);
1945 rx->iovec[inx] = notify->data.transfer.iovec;
1946 MSM_HS_DBG("Clear Q, Set P Bit %d, Q 0x%lx P 0x%lx",
1947 inx, rx->queued_flag, rx->pending_flag);
1948}
1949
1950/**
1951 * Callback notification from SPS driver
1952 *
1953 * This callback function gets triggered called from
1954 * SPS driver when requested SPS data transfer is
1955 * completed.
1956 *
1957 */
1958
1959static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
1960{
1961
1962 struct msm_hs_port *msm_uport =
1963 (struct msm_hs_port *)
1964 ((struct sps_event_notify *)notify)->user;
1965 struct uart_port *uport;
1966 unsigned long flags;
1967 struct msm_hs_rx *rx = &msm_uport->rx;
1968 phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
1969 notify->data.transfer.iovec.addr);
1970 /* divide by UARTDM_RX_BUF_SIZE */
1971 int inx = (addr - rx->rbuffer) >> 9;
1972
1973 uport = &(msm_uport->uport);
1974 msm_uport->notify = *notify;
1975 MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
1976 &addr, notify->data.transfer.iovec.size,
1977 notify->data.transfer.iovec.flags);
1978
1979 spin_lock_irqsave(&uport->lock, flags);
1980 msm_hs_mark_proc_rx_desc(msm_uport, notify);
1981 spin_unlock_irqrestore(&uport->lock, flags);
1982
1983 if (msm_uport->rx.flush == FLUSH_NONE) {
1984 /* Test if others are queued */
1985 if (msm_uport->rx.pending_flag & ~(1 << inx)) {
1986 MSM_HS_DBG("%s(): inx 0x%x, 0x%lx not processed",
1987 __func__, inx,
1988 msm_uport->rx.pending_flag & ~(1<<inx));
1989 }
1990 kthread_queue_work(&msm_uport->rx.kworker,
1991 &msm_uport->rx.kwork);
1992 MSM_HS_DBG("%s(): Scheduled rx_tlet", __func__);
1993 }
1994}
1995
1996/*
1997 * Standard API, Current states of modem control inputs
1998 *
1999 * Since CTS can be handled entirely by HARDWARE we always
2000 * indicate clear to send and count on the TX FIFO to block when
2001 * it fills up.
2002 *
2003 * - TIOCM_DCD
2004 * - TIOCM_CTS
2005 * - TIOCM_DSR
2006 * - TIOCM_RI
2007 * (Unsupported) DCD and DSR will return them high. RI will return low.
2008 */
2009static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
2010{
2011 return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
2012}
2013
2014/*
2015 * Standard API, Set or clear RFR_signal
2016 *
2017 * Set RFR high, (Indicate we are not ready for data), we disable auto
2018 * ready for receiving and then set RFR_N high. To set RFR to low we just turn
2019 * back auto ready for receiving and it should lower RFR signal
2020 * when hardware is ready
2021 */
2022void msm_hs_set_mctrl_locked(struct uart_port *uport,
2023 unsigned int mctrl)
2024{
2025 unsigned int set_rts;
2026 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2027
2028 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
2029 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
2030 return;
2031 }
2032 /* RTS is active low */
2033 set_rts = TIOCM_RTS & mctrl ? 0 : 1;
2034 MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
2035
2036 if (set_rts)
2037 msm_hs_disable_flow_control(uport, false);
2038 else
2039 msm_hs_enable_flow_control(uport, false);
2040}
2041
2042void msm_hs_set_mctrl(struct uart_port *uport,
2043 unsigned int mctrl)
2044{
2045 unsigned long flags;
2046 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2047
2048 msm_hs_resource_vote(msm_uport);
2049 spin_lock_irqsave(&uport->lock, flags);
2050 msm_hs_set_mctrl_locked(uport, mctrl);
2051 spin_unlock_irqrestore(&uport->lock, flags);
2052 msm_hs_resource_unvote(msm_uport);
2053}
2054EXPORT_SYMBOL(msm_hs_set_mctrl);
2055
2056/* Standard API, Enable modem status (CTS) interrupt */
2057static void msm_hs_enable_ms_locked(struct uart_port *uport)
2058{
2059 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2060
2061 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
2062 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
2063 return;
2064 }
2065
2066 /* Enable DELTA_CTS Interrupt */
2067 msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
2068 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
2069 /* Ensure register IO completion */
2070 mb();
2071
2072}
2073
2074/*
2075 * Standard API, Break Signal
2076 *
2077 * Control the transmission of a break signal. ctl eq 0 => break
2078 * signal terminate ctl ne 0 => start break signal
2079 */
2080static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
2081{
2082 unsigned long flags;
2083 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2084
2085 msm_hs_resource_vote(msm_uport);
2086 spin_lock_irqsave(&uport->lock, flags);
2087 msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
2088 /* Ensure register IO completion */
2089 mb();
2090 spin_unlock_irqrestore(&uport->lock, flags);
2091 msm_hs_resource_unvote(msm_uport);
2092}
2093
2094static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
2095{
2096 if (cfg_flags & UART_CONFIG_TYPE)
2097 uport->type = PORT_MSM;
2098
2099}
2100
2101/* Handle CTS changes (Called from interrupt handler) */
2102static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
2103{
2104 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2105
2106 msm_hs_resource_vote(msm_uport);
2107 /* clear interrupt */
2108 msm_hs_write(uport, UART_DM_CR, RESET_CTS);
2109 /* Calling CLOCK API. Hence mb() requires here. */
2110 mb();
2111 uport->icount.cts++;
2112
2113 /* clear the IOCTL TIOCMIWAIT if called */
2114 wake_up_interruptible(&uport->state->port.delta_msr_wait);
2115 msm_hs_resource_unvote(msm_uport);
2116}
2117
2118static irqreturn_t msm_hs_isr(int irq, void *dev)
2119{
2120 unsigned long flags;
2121 unsigned int isr_status;
2122 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
2123 struct uart_port *uport = &msm_uport->uport;
2124 struct circ_buf *tx_buf = &uport->state->xmit;
2125 struct msm_hs_tx *tx = &msm_uport->tx;
2126
2127 spin_lock_irqsave(&uport->lock, flags);
2128
2129 isr_status = msm_hs_read(uport, UART_DM_MISR);
2130 MSM_HS_INFO("%s: DM_ISR: 0x%x\n", __func__, isr_status);
2131 dump_uart_hs_registers(msm_uport);
2132
2133 /* Uart RX starting */
2134 if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
2135 MSM_HS_DBG("%s:UARTDM_ISR_RXLEV_BMSK\n", __func__);
2136 msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
2137 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
2138 /* Complete device write for IMR. Hence mb() requires. */
2139 mb();
2140 }
2141 /* Stale rx interrupt */
2142 if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
2143 msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
2144 msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
2145 /*
2146 * Complete device write before calling DMOV API. Hence
2147 * mb() requires here.
2148 */
2149 mb();
2150 MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
2151 }
2152 /* tx ready interrupt */
2153 if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
2154 MSM_HS_DBG("%s: ISR_TX_READY Interrupt\n", __func__);
2155 /* Clear TX Ready */
2156 msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
2157
2158 /*
2159 * Complete both writes before starting new TX.
2160 * Hence mb() requires here.
2161 */
2162 mb();
2163 /* Complete DMA TX transactions and submit new transactions */
2164
2165 /* Do not update tx_buf.tail if uart_flush_buffer already
2166 * called in serial core
2167 */
2168 if (!uart_circ_empty(tx_buf))
2169 tx_buf->tail = (tx_buf->tail +
2170 tx->tx_count) & ~UART_XMIT_SIZE;
2171
2172 tx->dma_in_flight = false;
2173
2174 uport->icount.tx += tx->tx_count;
2175
2176 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
2177 uart_write_wakeup(uport);
2178 }
2179 if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
2180 /* TX FIFO is empty */
2181 msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
2182 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
2183 MSM_HS_DBG("%s: TXLEV Interrupt\n", __func__);
2184 /*
2185 * Complete device write before starting clock_off request.
2186 * Hence mb() requires here.
2187 */
2188 mb();
2189 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
2190 }
2191
2192 /* Change in CTS interrupt */
2193 if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
2194 msm_hs_handle_delta_cts_locked(uport);
2195
2196 spin_unlock_irqrestore(&uport->lock, flags);
2197
2198 return IRQ_HANDLED;
2199}
2200
2201/* The following two functions provide interfaces to get the underlying
2202 * port structure (struct uart_port or struct msm_hs_port) given
2203 * the port index. msm_hs_get_uart port is called by clients.
2204 * The function msm_hs_get_hs_port is for internal use
2205 */
2206
2207struct uart_port *msm_hs_get_uart_port(int port_index)
2208{
2209 struct uart_state *state = msm_hs_driver.state + port_index;
2210
2211 /* The uart_driver structure stores the states in an array.
2212 * Thus the corresponding offset from the drv->state returns
2213 * the state for the uart_port that is requested
2214 */
2215 if (port_index == state->uart_port->line)
2216 return state->uart_port;
2217
2218 return NULL;
2219}
2220EXPORT_SYMBOL(msm_hs_get_uart_port);
2221
2222static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
2223{
2224 struct uart_port *uport = msm_hs_get_uart_port(port_index);
2225
2226 if (uport)
2227 return UARTDM_TO_MSM(uport);
2228 return NULL;
2229}
2230
2231void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
2232{
2233 unsigned long flags;
2234 struct uart_port *uport = &(msm_uport->uport);
2235
2236 if (!is_use_low_power_wakeup(msm_uport))
2237 return;
2238 if (msm_uport->wakeup.freed)
2239 return;
2240
2241 if (!(msm_uport->wakeup.enabled)) {
2242 spin_lock_irqsave(&uport->lock, flags);
2243 msm_uport->wakeup.ignore = 1;
2244 msm_uport->wakeup.enabled = true;
2245 spin_unlock_irqrestore(&uport->lock, flags);
2246 disable_irq(uport->irq);
2247 enable_irq(msm_uport->wakeup.irq);
2248 } else {
2249 MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
2250 }
2251}
2252
2253void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
2254{
2255 unsigned long flags;
2256 struct uart_port *uport = &(msm_uport->uport);
2257
2258 if (!is_use_low_power_wakeup(msm_uport))
2259 return;
2260 if (msm_uport->wakeup.freed)
2261 return;
2262
2263 if (msm_uport->wakeup.enabled) {
2264 disable_irq_nosync(msm_uport->wakeup.irq);
2265 enable_irq(uport->irq);
2266 spin_lock_irqsave(&uport->lock, flags);
2267 msm_uport->wakeup.enabled = false;
2268 spin_unlock_irqrestore(&uport->lock, flags);
2269 } else {
2270 MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
2271 }
2272}
2273
2274void msm_hs_resource_off(struct msm_hs_port *msm_uport)
2275{
2276 struct uart_port *uport = &(msm_uport->uport);
2277 unsigned int data;
2278
2279 MSM_HS_DBG("%s(): begin", __func__);
2280 msm_hs_disable_flow_control(uport, false);
2281 if (msm_uport->rx.flush == FLUSH_NONE)
2282 msm_hs_disconnect_rx(uport);
2283
2284 /* disable dlink */
2285 if (msm_uport->tx.flush == FLUSH_NONE)
2286 wait_event_timeout(msm_uport->tx.wait,
2287 msm_uport->tx.flush == FLUSH_STOP, 500);
2288
2289 if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
2290 data = msm_hs_read(uport, UART_DM_DMEN);
2291 data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
2292 msm_hs_write(uport, UART_DM_DMEN, data);
2293 sps_tx_disconnect(msm_uport);
2294 }
2295 if (!atomic_read(&msm_uport->client_req_state))
2296 msm_hs_enable_flow_control(uport, false);
2297}
2298
2299void msm_hs_resource_on(struct msm_hs_port *msm_uport)
2300{
2301 struct uart_port *uport = &(msm_uport->uport);
2302 unsigned int data;
2303 unsigned long flags;
2304
2305 if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
2306 msm_uport->rx.flush == FLUSH_STOP) {
2307 msm_hs_write(uport, UART_DM_CR, RESET_RX);
2308 data = msm_hs_read(uport, UART_DM_DMEN);
2309 data |= UARTDM_RX_BAM_ENABLE_BMSK;
2310 msm_hs_write(uport, UART_DM_DMEN, data);
2311 }
2312
2313 msm_hs_spsconnect_tx(msm_uport);
2314 if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
2315 msm_hs_spsconnect_rx(uport);
2316 spin_lock_irqsave(&uport->lock, flags);
2317 msm_hs_start_rx_locked(uport);
2318 spin_unlock_irqrestore(&uport->lock, flags);
2319 }
2320}
2321
2322/* Request to turn off uart clock once pending TX is flushed */
2323int msm_hs_request_clock_off(struct uart_port *uport)
2324{
2325 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2326 int ret = 0;
2327 int client_count = 0;
2328
2329 mutex_lock(&msm_uport->mtx);
2330 /*
2331 * If we're in the middle of a system suspend, don't process these
2332 * userspace/kernel API commands.
2333 */
2334 if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
2335 MSM_HS_WARN("%s:Can't process clk request during suspend",
2336 __func__);
2337 ret = -EIO;
2338 }
2339 mutex_unlock(&msm_uport->mtx);
2340 if (ret)
2341 goto exit_request_clock_off;
2342
2343 if (atomic_read(&msm_uport->client_count) <= 0) {
2344 MSM_HS_WARN("%s(): ioctl count -ve, client check voting",
2345 __func__);
2346 ret = -EPERM;
2347 goto exit_request_clock_off;
2348 }
2349 /* Set the flag to disable flow control and wakeup irq */
2350 if (msm_uport->obs)
2351 atomic_set(&msm_uport->client_req_state, 1);
2352 msm_hs_resource_unvote(msm_uport);
2353 atomic_dec(&msm_uport->client_count);
2354 client_count = atomic_read(&msm_uport->client_count);
2355 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
2356 "%s: Client_Count %d\n", __func__,
2357 client_count);
2358exit_request_clock_off:
2359 return ret;
2360}
2361EXPORT_SYMBOL(msm_hs_request_clock_off);
2362
2363int msm_hs_request_clock_on(struct uart_port *uport)
2364{
2365 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2366 int client_count;
2367 int ret = 0;
2368
2369 mutex_lock(&msm_uport->mtx);
2370 /*
2371 * If we're in the middle of a system suspend, don't process these
2372 * userspace/kernel API commands.
2373 */
2374 if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
2375 MSM_HS_WARN("%s:Can't process clk request during suspend",
2376 __func__);
2377 ret = -EIO;
2378 }
2379 mutex_unlock(&msm_uport->mtx);
2380 if (ret)
2381 goto exit_request_clock_on;
2382
2383 msm_hs_resource_vote(UARTDM_TO_MSM(uport));
2384 atomic_inc(&msm_uport->client_count);
2385 client_count = atomic_read(&msm_uport->client_count);
2386 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
2387 "%s: Client_Count %d\n", __func__,
2388 client_count);
2389
2390 /* Clear the flag */
2391 if (msm_uport->obs)
2392 atomic_set(&msm_uport->client_req_state, 0);
2393exit_request_clock_on:
2394 return ret;
2395}
2396EXPORT_SYMBOL(msm_hs_request_clock_on);
2397
2398static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
2399{
2400 unsigned int wakeup = 0;
2401 unsigned long flags;
2402 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
2403 struct uart_port *uport = &msm_uport->uport;
2404 struct tty_struct *tty = NULL;
2405
2406 spin_lock_irqsave(&uport->lock, flags);
2407
2408 if (msm_uport->wakeup.ignore)
2409 msm_uport->wakeup.ignore = 0;
2410 else
2411 wakeup = 1;
2412
2413 if (wakeup) {
2414 /*
2415 * Port was clocked off during rx, wake up and
2416 * optionally inject char into tty rx
2417 */
2418 if (msm_uport->wakeup.inject_rx) {
2419 tty = uport->state->port.tty;
2420 tty_insert_flip_char(tty->port,
2421 msm_uport->wakeup.rx_to_inject,
2422 TTY_NORMAL);
2423 hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
2424 "Rx Inject",
2425 &msm_uport->wakeup.rx_to_inject, 0, 1);
2426 MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
2427 msm_uport->wakeup.ignore);
2428 }
2429 }
2430
2431 spin_unlock_irqrestore(&uport->lock, flags);
2432
2433 if (wakeup && msm_uport->wakeup.inject_rx)
2434 tty_flip_buffer_push(tty->port);
2435 return IRQ_HANDLED;
2436}
2437
2438static const char *msm_hs_type(struct uart_port *port)
2439{
2440 return "MSM HS UART";
2441}
2442
2443/**
2444 * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
2445 * @uport: uart port
2446 */
2447static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
2448{
2449 struct platform_device *pdev = to_platform_device(uport->dev);
2450 const struct msm_serial_hs_platform_data *pdata =
2451 pdev->dev.platform_data;
2452 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2453 int ret;
2454
2455 if (msm_uport->use_pinctrl) {
2456 ret = pinctrl_select_state(msm_uport->pinctrl,
2457 msm_uport->gpio_state_suspend);
2458 if (ret)
2459 MSM_HS_ERR("%s():Failed to pinctrl set_state",
2460 __func__);
2461 } else if (pdata) {
2462 if (gpio_is_valid(pdata->uart_tx_gpio))
2463 gpio_free(pdata->uart_tx_gpio);
2464 if (gpio_is_valid(pdata->uart_rx_gpio))
2465 gpio_free(pdata->uart_rx_gpio);
2466 if (gpio_is_valid(pdata->uart_cts_gpio))
2467 gpio_free(pdata->uart_cts_gpio);
2468 if (gpio_is_valid(pdata->uart_rfr_gpio))
2469 gpio_free(pdata->uart_rfr_gpio);
2470 } else
2471 MSM_HS_ERR("Error:Pdata is NULL.\n");
2472}
2473
2474/**
2475 * msm_hs_config_uart_gpios - Configures UART GPIOs
2476 * @uport: uart port
2477 */
2478static int msm_hs_config_uart_gpios(struct uart_port *uport)
2479{
2480 struct platform_device *pdev = to_platform_device(uport->dev);
2481 const struct msm_serial_hs_platform_data *pdata =
2482 pdev->dev.platform_data;
2483 int ret = 0;
2484 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2485
2486 if (!IS_ERR_OR_NULL(msm_uport->pinctrl)) {
2487 MSM_HS_DBG("%s(): Using Pinctrl", __func__);
2488 msm_uport->use_pinctrl = true;
2489 ret = pinctrl_select_state(msm_uport->pinctrl,
2490 msm_uport->gpio_state_active);
2491 if (ret)
2492 MSM_HS_ERR("%s(): Failed to pinctrl set_state",
2493 __func__);
2494 return ret;
2495 } else if (pdata) {
2496 /* Fall back to using gpio lib */
2497 if (gpio_is_valid(pdata->uart_tx_gpio)) {
2498 ret = gpio_request(pdata->uart_tx_gpio,
2499 "UART_TX_GPIO");
2500 if (unlikely(ret)) {
2501 MSM_HS_ERR("gpio request failed for:%d\n",
2502 pdata->uart_tx_gpio);
2503 goto exit_uart_config;
2504 }
2505 }
2506
2507 if (gpio_is_valid(pdata->uart_rx_gpio)) {
2508 ret = gpio_request(pdata->uart_rx_gpio,
2509 "UART_RX_GPIO");
2510 if (unlikely(ret)) {
2511 MSM_HS_ERR("gpio request failed for:%d\n",
2512 pdata->uart_rx_gpio);
2513 goto uart_tx_unconfig;
2514 }
2515 }
2516
2517 if (gpio_is_valid(pdata->uart_cts_gpio)) {
2518 ret = gpio_request(pdata->uart_cts_gpio,
2519 "UART_CTS_GPIO");
2520 if (unlikely(ret)) {
2521 MSM_HS_ERR("gpio request failed for:%d\n",
2522 pdata->uart_cts_gpio);
2523 goto uart_rx_unconfig;
2524 }
2525 }
2526
2527 if (gpio_is_valid(pdata->uart_rfr_gpio)) {
2528 ret = gpio_request(pdata->uart_rfr_gpio,
2529 "UART_RFR_GPIO");
2530 if (unlikely(ret)) {
2531 MSM_HS_ERR("gpio request failed for:%d\n",
2532 pdata->uart_rfr_gpio);
2533 goto uart_cts_unconfig;
2534 }
2535 }
2536 } else {
2537 MSM_HS_ERR("Pdata is NULL.\n");
2538 ret = -EINVAL;
2539 }
2540 return ret;
2541
2542uart_cts_unconfig:
2543 if (gpio_is_valid(pdata->uart_cts_gpio))
2544 gpio_free(pdata->uart_cts_gpio);
2545uart_rx_unconfig:
2546 if (gpio_is_valid(pdata->uart_rx_gpio))
2547 gpio_free(pdata->uart_rx_gpio);
2548uart_tx_unconfig:
2549 if (gpio_is_valid(pdata->uart_tx_gpio))
2550 gpio_free(pdata->uart_tx_gpio);
2551exit_uart_config:
2552 return ret;
2553}
2554
2555
2556static void msm_hs_get_pinctrl_configs(struct uart_port *uport)
2557{
2558 struct pinctrl_state *set_state;
2559 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2560
2561 msm_uport->pinctrl = devm_pinctrl_get(uport->dev);
2562 if (IS_ERR_OR_NULL(msm_uport->pinctrl)) {
2563 MSM_HS_DBG("%s(): Pinctrl not defined", __func__);
2564 } else {
2565 MSM_HS_DBG("%s(): Using Pinctrl", __func__);
2566 msm_uport->use_pinctrl = true;
2567
2568 set_state = pinctrl_lookup_state(msm_uport->pinctrl,
2569 PINCTRL_STATE_DEFAULT);
2570 if (IS_ERR_OR_NULL(set_state)) {
2571 dev_err(uport->dev,
2572 "pinctrl lookup failed for default state");
2573 goto pinctrl_fail;
2574 }
2575
2576 MSM_HS_DBG("%s(): Pinctrl state active %p\n", __func__,
2577 set_state);
2578 msm_uport->gpio_state_active = set_state;
2579
2580 set_state = pinctrl_lookup_state(msm_uport->pinctrl,
2581 PINCTRL_STATE_SLEEP);
2582 if (IS_ERR_OR_NULL(set_state)) {
2583 dev_err(uport->dev,
2584 "pinctrl lookup failed for sleep state");
2585 goto pinctrl_fail;
2586 }
2587
2588 MSM_HS_DBG("%s(): Pinctrl state sleep %p\n", __func__,
2589 set_state);
2590 msm_uport->gpio_state_suspend = set_state;
2591 return;
2592 }
2593pinctrl_fail:
2594 msm_uport->pinctrl = NULL;
2595}
2596
2597/* Called when port is opened */
2598static int msm_hs_startup(struct uart_port *uport)
2599{
2600 int ret;
2601 int rfr_level;
2602 unsigned long flags;
2603 unsigned int data;
2604 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2605 struct circ_buf *tx_buf = &uport->state->xmit;
2606 struct msm_hs_tx *tx = &msm_uport->tx;
2607 struct msm_hs_rx *rx = &msm_uport->rx;
2608 struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
2609 struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
2610
2611 rfr_level = uport->fifosize;
2612 if (rfr_level > 16)
2613 rfr_level -= 16;
2614
2615 tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
2616 DMA_TO_DEVICE);
2617
2618 /* turn on uart clk */
2619 msm_hs_resource_vote(msm_uport);
2620
2621 if (is_use_low_power_wakeup(msm_uport)) {
2622 ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
2623 msm_hs_wakeup_isr,
2624 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2625 "msm_hs_wakeup", msm_uport);
2626 if (unlikely(ret)) {
2627 MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
2628 __func__, ret);
2629 goto unvote_exit;
2630 }
2631
2632 msm_uport->wakeup.freed = false;
2633 disable_irq(msm_uport->wakeup.irq);
2634 msm_uport->wakeup.enabled = false;
2635
2636 ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
2637 if (unlikely(ret)) {
2638 MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
2639 goto free_uart_irq;
2640 }
2641 }
2642
2643 ret = msm_hs_config_uart_gpios(uport);
2644 if (ret) {
2645 MSM_HS_ERR("Uart GPIO request failed\n");
2646 goto free_uart_irq;
2647 }
2648
2649 msm_hs_write(uport, UART_DM_DMEN, 0);
2650
2651 /* Connect TX */
2652 sps_tx_disconnect(msm_uport);
2653 ret = msm_hs_spsconnect_tx(msm_uport);
2654 if (ret) {
2655 MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX");
2656 goto unconfig_uart_gpios;
2657 }
2658
2659 /* Connect RX */
2660 kthread_flush_worker(&msm_uport->rx.kworker);
2661 if (rx->flush != FLUSH_SHUTDOWN)
2662 disconnect_rx_endpoint(msm_uport);
2663 ret = msm_hs_spsconnect_rx(uport);
2664 if (ret) {
2665 MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
2666 goto sps_disconnect_tx;
2667 }
2668
2669 data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
2670 UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
2671 UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
2672 msm_hs_write(uport, UART_DM_BCR, data);
2673
2674 /* Set auto RFR Level */
2675 data = msm_hs_read(uport, UART_DM_MR1);
2676 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
2677 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
2678 data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
2679 data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
2680 msm_hs_write(uport, UART_DM_MR1, data);
2681
2682 /* Make sure RXSTALE count is non-zero */
2683 data = msm_hs_read(uport, UART_DM_IPR);
2684 if (!data) {
2685 data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
2686 msm_hs_write(uport, UART_DM_IPR, data);
2687 }
2688
2689 /* Assume no flow control, unless termios sets it */
2690 msm_uport->flow_control = false;
2691 msm_hs_disable_flow_control(uport, true);
2692
2693
2694 /* Reset TX */
2695 msm_hs_write(uport, UART_DM_CR, RESET_TX);
2696 msm_hs_write(uport, UART_DM_CR, RESET_RX);
2697 msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
2698 msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
2699 msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
2700 msm_hs_write(uport, UART_DM_CR, RESET_CTS);
2701 msm_hs_write(uport, UART_DM_CR, RFR_LOW);
2702 /* Turn on Uart Receiver */
2703 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
2704
2705 /* Turn on Uart Transmitter */
2706 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
2707
2708 tx->dma_in_flight = false;
2709 MSM_HS_DBG("%s():desc usage flag 0x%lx", __func__, rx->queued_flag);
2710 setup_timer(&(tx->tx_timeout_timer),
2711 tx_timeout_handler,
2712 (unsigned long) msm_uport);
2713
2714 /* Enable reading the current CTS, no harm even if CTS is ignored */
2715 msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
2716
2717 /* TXLEV on empty TX fifo */
2718 msm_hs_write(uport, UART_DM_TFWR, 4);
2719 /*
2720 * Complete all device write related configuration before
2721 * queuing RX request. Hence mb() requires here.
2722 */
2723 mb();
2724
2725 ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
2726 "msm_hs_uart", msm_uport);
2727 if (unlikely(ret)) {
2728 MSM_HS_ERR("%s():Error %d getting uart irq\n", __func__, ret);
2729 goto sps_disconnect_rx;
2730 }
2731
2732
2733 spin_lock_irqsave(&uport->lock, flags);
2734 atomic_set(&msm_uport->client_count, 0);
2735 atomic_set(&msm_uport->client_req_state, 0);
2736 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
2737 "%s: Client_Count 0\n", __func__);
2738 msm_hs_start_rx_locked(uport);
2739
2740 spin_unlock_irqrestore(&uport->lock, flags);
2741
2742 msm_hs_resource_unvote(msm_uport);
2743 return 0;
2744
2745sps_disconnect_rx:
2746 sps_disconnect(sps_pipe_handle_rx);
2747sps_disconnect_tx:
2748 sps_disconnect(sps_pipe_handle_tx);
2749unconfig_uart_gpios:
2750 msm_hs_unconfig_uart_gpios(uport);
2751free_uart_irq:
2752 free_irq(uport->irq, msm_uport);
2753unvote_exit:
2754 msm_hs_resource_unvote(msm_uport);
2755 MSM_HS_ERR("%s(): Error return\n", __func__);
2756 return ret;
2757}
2758
2759/* Initialize tx and rx data structures */
2760static int uartdm_init_port(struct uart_port *uport)
2761{
2762 int ret = 0;
2763 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2764 struct msm_hs_tx *tx = &msm_uport->tx;
2765 struct msm_hs_rx *rx = &msm_uport->rx;
2766
2767 init_waitqueue_head(&rx->wait);
2768 init_waitqueue_head(&tx->wait);
2769 init_waitqueue_head(&msm_uport->bam_disconnect_wait);
2770
2771 /* Init kernel threads for tx and rx */
2772
2773 kthread_init_worker(&rx->kworker);
2774 rx->task = kthread_run(kthread_worker_fn,
2775 &rx->kworker, "msm_serial_hs_%d_rx_work", uport->line);
2776 if (IS_ERR(rx->task)) {
2777 MSM_HS_ERR("%s(): error creating task", __func__);
2778 goto exit_lh_init;
2779 }
2780 kthread_init_work(&rx->kwork, msm_serial_hs_rx_work);
2781
2782 kthread_init_worker(&tx->kworker);
2783 tx->task = kthread_run(kthread_worker_fn,
2784 &tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
2785 if (IS_ERR(rx->task)) {
2786 MSM_HS_ERR("%s(): error creating task", __func__);
2787 goto exit_lh_init;
2788 }
2789
2790 kthread_init_work(&tx->kwork, msm_serial_hs_tx_work);
2791
2792 rx->buffer = dma_alloc_coherent(uport->dev,
2793 UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
2794 &rx->rbuffer, GFP_KERNEL);
2795 if (!rx->buffer) {
2796 MSM_HS_ERR("%s(): cannot allocate rx->buffer", __func__);
2797 ret = -ENOMEM;
2798 goto exit_lh_init;
2799 }
2800
2801 /* Set up Uart Receive */
2802 msm_hs_write(uport, UART_DM_RFWR, 32);
2803 /* Write to BADR explicitly to set up FIFO sizes */
2804 msm_hs_write(uport, UARTDM_BADR_ADDR, 64);
2805
2806 INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
2807
2808 return ret;
2809exit_lh_init:
2810 kthread_stop(rx->task);
2811 rx->task = NULL;
2812 kthread_stop(tx->task);
2813 tx->task = NULL;
2814 return ret;
2815}
2816
2817struct msm_serial_hs_platform_data
2818 *msm_hs_dt_to_pdata(struct platform_device *pdev)
2819{
2820 struct device_node *node = pdev->dev.of_node;
2821 struct msm_serial_hs_platform_data *pdata;
2822 u32 rx_to_inject;
2823 int ret;
2824
2825 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2826 if (!pdata)
2827 return ERR_PTR(-ENOMEM);
2828
2829 pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
2830 /* UART TX GPIO */
2831 pdata->uart_tx_gpio = of_get_named_gpio(node,
2832 "qcom,tx-gpio", 0);
2833 if (pdata->uart_tx_gpio < 0)
2834 pr_err("uart_tx_gpio is not available\n");
2835
2836 /* UART RX GPIO */
2837 pdata->uart_rx_gpio = of_get_named_gpio(node,
2838 "qcom,rx-gpio", 0);
2839 if (pdata->uart_rx_gpio < 0)
2840 pr_err("uart_rx_gpio is not available\n");
2841
2842 /* UART CTS GPIO */
2843 pdata->uart_cts_gpio = of_get_named_gpio(node,
2844 "qcom,cts-gpio", 0);
2845 if (pdata->uart_cts_gpio < 0)
2846 pr_err("uart_cts_gpio is not available\n");
2847
2848 /* UART RFR GPIO */
2849 pdata->uart_rfr_gpio = of_get_named_gpio(node,
2850 "qcom,rfr-gpio", 0);
2851 if (pdata->uart_rfr_gpio < 0)
2852 pr_err("uart_rfr_gpio is not available\n");
2853
2854 pdata->no_suspend_delay = of_property_read_bool(node,
2855 "qcom,no-suspend-delay");
2856
2857 pdata->obs = of_property_read_bool(node,
2858 "qcom,msm-obs");
2859 if (pdata->obs)
2860 pr_err("%s:Out of Band sleep flag is set\n", __func__);
2861
2862 pdata->inject_rx_on_wakeup = of_property_read_bool(node,
2863 "qcom,inject-rx-on-wakeup");
2864
2865 if (pdata->inject_rx_on_wakeup) {
2866 ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
2867 &rx_to_inject);
2868 if (ret < 0) {
2869 pr_err("Error: Rx_char_to_inject not specified.\n");
2870 return ERR_PTR(ret);
2871 }
2872 pdata->rx_to_inject = (u8)rx_to_inject;
2873 }
2874
2875 ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
2876 &pdata->bam_tx_ep_pipe_index);
2877 if (ret < 0) {
2878 pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
2879 return ERR_PTR(ret);
2880 }
2881
2882 if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
2883 pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
2884 pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
2885 return ERR_PTR(-EINVAL);
2886 }
2887
2888 ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
2889 &pdata->bam_rx_ep_pipe_index);
2890 if (ret < 0) {
2891 pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
2892 return ERR_PTR(ret);
2893 }
2894
2895 if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
2896 pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
2897 pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
2898 return ERR_PTR(-EINVAL);
2899 }
2900
2901 pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
2902 "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
2903 pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
2904 pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
2905 pdata->uart_rfr_gpio);
2906
2907 return pdata;
2908}
2909
2910
2911/**
2912 * Deallocate UART peripheral's SPS endpoint
2913 * @msm_uport - Pointer to msm_hs_port structure
2914 * @ep - Pointer to sps endpoint data structure
2915 */
2916
2917static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
2918 struct msm_hs_sps_ep_conn_data *ep)
2919{
2920 struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
2921 struct sps_connect *sps_config = &ep->config;
2922
2923 dma_free_coherent(msm_uport->uport.dev,
2924 sps_config->desc.size,
2925 &sps_config->desc.phys_base,
2926 GFP_KERNEL);
2927 sps_free_endpoint(sps_pipe_handle);
2928}
2929
2930
2931/**
2932 * Allocate UART peripheral's SPS endpoint
2933 *
2934 * This function allocates endpoint context
2935 * by calling appropriate SPS driver APIs.
2936 *
2937 * @msm_uport - Pointer to msm_hs_port structure
2938 * @ep - Pointer to sps endpoint data structure
2939 * @is_produce - 1 means Producer endpoint
2940 * - 0 means Consumer endpoint
2941 *
2942 * @return - 0 if successful else negative value
2943 */
2944
2945static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
2946 struct msm_hs_sps_ep_conn_data *ep,
2947 bool is_producer)
2948{
2949 int rc = 0;
2950 struct sps_pipe *sps_pipe_handle;
2951 struct sps_connect *sps_config = &ep->config;
2952 struct sps_register_event *sps_event = &ep->event;
2953
2954 /* Allocate endpoint context */
2955 sps_pipe_handle = sps_alloc_endpoint();
2956 if (!sps_pipe_handle) {
2957 MSM_HS_ERR("%s(): sps_alloc_endpoint() failed!!\n"
2958 "is_producer=%d", __func__, is_producer);
2959 rc = -ENOMEM;
2960 goto out;
2961 }
2962
2963 /* Get default connection configuration for an endpoint */
2964 rc = sps_get_config(sps_pipe_handle, sps_config);
2965 if (rc) {
2966 MSM_HS_ERR("%s(): failed! pipe_handle=0x%p rc=%d",
2967 __func__, sps_pipe_handle, rc);
2968 goto get_config_err;
2969 }
2970
2971 /* Modify the default connection configuration */
2972 if (is_producer) {
2973 /* For UART producer transfer, source is UART peripheral
2974 * where as destination is system memory
2975 */
2976 sps_config->source = msm_uport->bam_handle;
2977 sps_config->destination = SPS_DEV_HANDLE_MEM;
2978 sps_config->mode = SPS_MODE_SRC;
2979 sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
2980 sps_config->dest_pipe_index = 0;
2981 sps_event->callback = msm_hs_sps_rx_callback;
2982 } else {
2983 /* For UART consumer transfer, source is system memory
2984 * where as destination is UART peripheral
2985 */
2986 sps_config->source = SPS_DEV_HANDLE_MEM;
2987 sps_config->destination = msm_uport->bam_handle;
2988 sps_config->mode = SPS_MODE_DEST;
2989 sps_config->src_pipe_index = 0;
2990 sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
2991 sps_event->callback = msm_hs_sps_tx_callback;
2992 }
2993
2994 sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
2995 sps_config->event_thresh = 0x10;
2996
2997 /* Allocate maximum descriptor fifo size */
2998 sps_config->desc.size =
2999 (1 + UART_DMA_DESC_NR) * sizeof(struct sps_iovec);
3000 sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
3001 sps_config->desc.size,
3002 &sps_config->desc.phys_base,
3003 GFP_KERNEL);
3004 if (!sps_config->desc.base) {
3005 rc = -ENOMEM;
3006 MSM_HS_ERR("msm_serial_hs: dma_alloc_coherent() failed!!\n");
3007 goto get_config_err;
3008 }
3009 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
3010
3011 sps_event->mode = SPS_TRIGGER_CALLBACK;
3012
3013 sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
3014 sps_event->user = (void *)msm_uport;
3015
3016 /* Now save the sps pipe handle */
3017 ep->pipe_handle = sps_pipe_handle;
3018 MSM_HS_DBG("msm_serial_hs: success !! %s: pipe_handle=0x%p\n"
3019 "desc_fifo.phys_base=0x%pa\n",
3020 is_producer ? "READ" : "WRITE",
3021 sps_pipe_handle, &sps_config->desc.phys_base);
3022 return 0;
3023
3024get_config_err:
3025 sps_free_endpoint(sps_pipe_handle);
3026out:
3027 return rc;
3028}
3029
3030/**
3031 * Initialize SPS HW connected with UART core
3032 *
3033 * This function register BAM HW resources with
3034 * SPS driver and then initialize 2 SPS endpoints
3035 *
3036 * msm_uport - Pointer to msm_hs_port structure
3037 *
3038 * @return - 0 if successful else negative value
3039 */
3040
3041static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
3042{
3043 int rc = 0;
3044 struct sps_bam_props bam = {0};
3045 unsigned long bam_handle;
3046
3047 rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
3048 if (rc || !bam_handle) {
3049 bam.phys_addr = msm_uport->bam_mem;
3050 bam.virt_addr = msm_uport->bam_base;
3051 /*
3052 * This event thresold value is only significant for BAM-to-BAM
3053 * transfer. It's ignored for BAM-to-System mode transfer.
3054 */
3055 bam.event_threshold = 0x10; /* Pipe event threshold */
3056 bam.summing_threshold = 1; /* BAM event threshold */
3057
3058 /* SPS driver wll handle the UART BAM IRQ */
3059 bam.irq = (u32)msm_uport->bam_irq;
3060 bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
3061
3062 MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
3063 &bam.phys_addr);
3064 MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%p\n",
3065 bam.virt_addr);
3066
3067 /* Register UART Peripheral BAM device to SPS driver */
3068 rc = sps_register_bam_device(&bam, &bam_handle);
3069 if (rc) {
3070 MSM_HS_ERR("%s: BAM device register failed\n",
3071 __func__);
3072 return rc;
3073 }
3074 MSM_HS_DBG("%s:BAM device registered. bam_handle=0x%lx",
3075 __func__, msm_uport->bam_handle);
3076 }
3077 msm_uport->bam_handle = bam_handle;
3078
3079 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
3080 UART_SPS_PROD_PERIPHERAL);
3081 if (rc) {
3082 MSM_HS_ERR("%s: Failed to Init Producer BAM-pipe", __func__);
3083 goto deregister_bam;
3084 }
3085
3086 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
3087 UART_SPS_CONS_PERIPHERAL);
3088 if (rc) {
3089 MSM_HS_ERR("%s: Failed to Init Consumer BAM-pipe", __func__);
3090 goto deinit_ep_conn_prod;
3091 }
3092 return 0;
3093
3094deinit_ep_conn_prod:
3095 msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
3096deregister_bam:
3097 sps_deregister_bam_device(msm_uport->bam_handle);
3098 return rc;
3099}
3100
3101
3102static bool deviceid[UARTDM_NR] = {0};
3103/*
3104 * The mutex synchronizes grabbing next free device number
3105 * both in case of an alias being used or not. When alias is
3106 * used, the msm_hs_dt_to_pdata gets it and the boolean array
3107 * is accordingly updated with device_id_set_used. If no alias
3108 * is used, then device_id_grab_next_free sets that array.
3109 */
3110static DEFINE_MUTEX(mutex_next_device_id);
3111
3112static int device_id_grab_next_free(void)
3113{
3114 int i;
3115 int ret = -ENODEV;
3116
3117 mutex_lock(&mutex_next_device_id);
3118 for (i = 0; i < UARTDM_NR; i++)
3119 if (!deviceid[i]) {
3120 ret = i;
3121 deviceid[i] = true;
3122 break;
3123 }
3124 mutex_unlock(&mutex_next_device_id);
3125 return ret;
3126}
3127
3128static int device_id_set_used(int index)
3129{
3130 int ret = 0;
3131
3132 mutex_lock(&mutex_next_device_id);
3133 if (deviceid[index])
3134 ret = -ENODEV;
3135 else
3136 deviceid[index] = true;
3137 mutex_unlock(&mutex_next_device_id);
3138 return ret;
3139}
3140
3141static void obs_manage_irq(struct msm_hs_port *msm_uport, bool en)
3142{
3143 struct uart_port *uport = &(msm_uport->uport);
3144
3145 if (msm_uport->obs) {
3146 if (en)
3147 enable_irq(uport->irq);
3148 else
3149 disable_irq(uport->irq);
3150 }
3151}
3152
3153static void msm_hs_pm_suspend(struct device *dev)
3154{
3155 struct platform_device *pdev = to_platform_device(dev);
3156 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3157 int ret;
3158 int client_count = 0;
3159
3160 if (!msm_uport)
3161 goto err_suspend;
3162 mutex_lock(&msm_uport->mtx);
3163
3164 client_count = atomic_read(&msm_uport->client_count);
3165 msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
3166 msm_hs_resource_off(msm_uport);
3167 obs_manage_irq(msm_uport, false);
3168 msm_hs_clk_bus_unvote(msm_uport);
3169
3170 /* For OBS, don't use wakeup interrupt, set gpio to suspended state */
3171 if (msm_uport->obs) {
3172 ret = pinctrl_select_state(msm_uport->pinctrl,
3173 msm_uport->gpio_state_suspend);
3174 if (ret)
3175 MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
3176 __func__);
3177 }
3178
3179 if (!atomic_read(&msm_uport->client_req_state))
3180 enable_wakeup_interrupt(msm_uport);
3181 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3182 "%s: PM State Suspended client_count %d\n", __func__,
3183 client_count);
3184 mutex_unlock(&msm_uport->mtx);
3185 return;
3186err_suspend:
3187 pr_err("%s(): invalid uport", __func__);
3188}
3189
3190static int msm_hs_pm_resume(struct device *dev)
3191{
3192 struct platform_device *pdev = to_platform_device(dev);
3193 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3194 int ret = 0;
3195 int client_count = 0;
3196
3197 if (!msm_uport) {
3198 dev_err(dev, "%s:Invalid uport\n", __func__);
3199 return -ENODEV;
3200 }
3201
3202 mutex_lock(&msm_uport->mtx);
3203 client_count = atomic_read(&msm_uport->client_count);
3204 if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
3205 goto exit_pm_resume;
3206 if (!atomic_read(&msm_uport->client_req_state))
3207 disable_wakeup_interrupt(msm_uport);
3208
3209 /* For OBS, don't use wakeup interrupt, set gpio to active state */
3210 if (msm_uport->obs) {
3211 ret = pinctrl_select_state(msm_uport->pinctrl,
3212 msm_uport->gpio_state_active);
3213 if (ret)
3214 MSM_HS_ERR("%s():Error selecting active state",
3215 __func__);
3216 }
3217
3218 ret = msm_hs_clk_bus_vote(msm_uport);
3219 if (ret) {
3220 MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
3221 dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
3222 goto exit_pm_resume;
3223 }
3224 obs_manage_irq(msm_uport, true);
3225 msm_uport->pm_state = MSM_HS_PM_ACTIVE;
3226 msm_hs_resource_on(msm_uport);
3227
3228 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3229 "%s:PM State:Active client_count %d\n", __func__, client_count);
3230exit_pm_resume:
3231 mutex_unlock(&msm_uport->mtx);
3232 return ret;
3233}
3234
3235#ifdef CONFIG_PM
3236static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
3237{
3238 struct platform_device *pdev = to_platform_device(dev);
3239 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3240 enum msm_hs_pm_state prev_pwr_state;
3241 int clk_cnt, client_count, ret = 0;
3242
3243 if (IS_ERR_OR_NULL(msm_uport))
3244 return -ENODEV;
3245
3246 mutex_lock(&msm_uport->mtx);
3247
3248 /*
3249 * If there is an active clk request or an impending userspace request
3250 * fail the suspend callback.
3251 */
3252 clk_cnt = atomic_read(&msm_uport->resource_count);
3253 client_count = atomic_read(&msm_uport->client_count);
3254 if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
3255 MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
3256 __func__, clk_cnt, client_count);
3257 ret = -EBUSY;
3258 goto exit_suspend_noirq;
3259 }
3260
3261 prev_pwr_state = msm_uport->pm_state;
3262 msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
3263 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3264 "%s:PM State:Sys-Suspended client_count %d\n", __func__,
3265 client_count);
3266exit_suspend_noirq:
3267 mutex_unlock(&msm_uport->mtx);
3268 return ret;
3269};
3270
3271static int msm_hs_pm_sys_resume_noirq(struct device *dev)
3272{
3273 struct platform_device *pdev = to_platform_device(dev);
3274 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3275
3276 if (IS_ERR_OR_NULL(msm_uport))
3277 return -ENODEV;
3278 /*
3279 * Note system-pm resume and update the state
3280 * variable. Resource activation will be done
3281 * when transfer is requested.
3282 */
3283
3284 mutex_lock(&msm_uport->mtx);
3285 if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
3286 msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
3287 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3288 "%s:PM State: Suspended\n", __func__);
3289 mutex_unlock(&msm_uport->mtx);
3290 return 0;
3291}
3292#endif
3293
3294#ifdef CONFIG_PM
3295static void msm_serial_hs_rt_init(struct uart_port *uport)
3296{
3297 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
3298
3299 MSM_HS_INFO("%s(): Enabling runtime pm", __func__);
3300 pm_runtime_set_suspended(uport->dev);
3301 pm_runtime_set_autosuspend_delay(uport->dev, 100);
3302 pm_runtime_use_autosuspend(uport->dev);
3303 mutex_lock(&msm_uport->mtx);
3304 msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
3305 mutex_unlock(&msm_uport->mtx);
3306 pm_runtime_enable(uport->dev);
3307}
3308
3309static int msm_hs_runtime_suspend(struct device *dev)
3310{
3311 msm_hs_pm_suspend(dev);
3312 return 0;
3313}
3314
3315static int msm_hs_runtime_resume(struct device *dev)
3316{
3317 return msm_hs_pm_resume(dev);
3318}
3319#else
3320static void msm_serial_hs_rt_init(struct uart_port *uport) {}
3321static int msm_hs_runtime_suspend(struct device *dev) {}
3322static int msm_hs_runtime_resume(struct device *dev) {}
3323#endif
3324
3325
3326static int msm_hs_probe(struct platform_device *pdev)
3327{
3328 int ret = 0;
3329 struct uart_port *uport;
3330 struct msm_hs_port *msm_uport;
3331 struct resource *core_resource;
3332 struct resource *bam_resource;
3333 int core_irqres, bam_irqres, wakeup_irqres;
3334 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
3335 unsigned long data;
3336 char name[30];
3337
3338 if (pdev->dev.of_node) {
3339 dev_dbg(&pdev->dev, "device tree enabled\n");
3340 pdata = msm_hs_dt_to_pdata(pdev);
3341 if (IS_ERR(pdata))
3342 return PTR_ERR(pdata);
3343
3344 if (pdev->id < 0) {
3345 pdev->id = device_id_grab_next_free();
3346 if (pdev->id < 0) {
3347 dev_err(&pdev->dev,
3348 "Error grabbing next free device id");
3349 return pdev->id;
3350 }
3351 } else {
3352 ret = device_id_set_used(pdev->id);
3353 if (ret < 0) {
3354 dev_err(&pdev->dev, "%d alias taken",
3355 pdev->id);
3356 return ret;
3357 }
3358 }
3359 pdev->dev.platform_data = pdata;
3360 }
3361
3362 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
3363 dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
3364 pdev->id);
3365 return -EINVAL;
3366 }
3367
3368 msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
3369 GFP_KERNEL);
3370 if (!msm_uport)
3371 return -ENOMEM;
3372
3373 msm_uport->uport.type = PORT_UNKNOWN;
3374 uport = &msm_uport->uport;
3375 uport->dev = &pdev->dev;
3376
3377 if (pdev->dev.of_node)
3378 msm_uport->uart_type = BLSP_HSUART;
3379
3380 msm_hs_get_pinctrl_configs(uport);
3381 /* Get required resources for BAM HSUART */
3382 core_resource = platform_get_resource_byname(pdev,
3383 IORESOURCE_MEM, "core_mem");
3384 if (!core_resource) {
3385 dev_err(&pdev->dev, "Invalid core HSUART Resources.\n");
3386 return -ENXIO;
3387 }
3388 bam_resource = platform_get_resource_byname(pdev,
3389 IORESOURCE_MEM, "bam_mem");
3390 if (!bam_resource) {
3391 dev_err(&pdev->dev, "Invalid BAM HSUART Resources.\n");
3392 return -ENXIO;
3393 }
3394 core_irqres = platform_get_irq_byname(pdev, "core_irq");
3395 if (core_irqres < 0) {
3396 dev_err(&pdev->dev, "Error %d, invalid core irq resources.\n",
3397 core_irqres);
3398 return -ENXIO;
3399 }
3400 bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
3401 if (bam_irqres < 0) {
3402 dev_err(&pdev->dev, "Error %d, invalid bam irq resources.\n",
3403 bam_irqres);
3404 return -ENXIO;
3405 }
3406 wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
3407 if (wakeup_irqres < 0) {
3408 wakeup_irqres = -1;
3409 pr_info("Wakeup irq not specified.\n");
3410 }
3411
3412 uport->mapbase = core_resource->start;
3413
3414 uport->membase = ioremap(uport->mapbase,
3415 resource_size(core_resource));
3416 if (unlikely(!uport->membase)) {
3417 dev_err(&pdev->dev, "UART Resource ioremap Failed.\n");
3418 return -ENOMEM;
3419 }
3420 msm_uport->bam_mem = bam_resource->start;
3421 msm_uport->bam_base = ioremap(msm_uport->bam_mem,
3422 resource_size(bam_resource));
3423 if (unlikely(!msm_uport->bam_base)) {
3424 dev_err(&pdev->dev, "UART BAM Resource ioremap Failed.\n");
3425 iounmap(uport->membase);
3426 return -ENOMEM;
3427 }
3428
3429 memset(name, 0, sizeof(name));
3430 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3431 "_state");
3432 msm_uport->ipc_msm_hs_log_ctxt =
3433 ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
3434 name, 0);
3435 if (!msm_uport->ipc_msm_hs_log_ctxt) {
3436 dev_err(&pdev->dev, "%s: error creating logging context",
3437 __func__);
3438 } else {
3439 msm_uport->ipc_debug_mask = INFO_LEV;
3440 ret = sysfs_create_file(&pdev->dev.kobj,
3441 &dev_attr_debug_mask.attr);
3442 if (unlikely(ret))
3443 MSM_HS_WARN("%s: Failed to create dev. attr", __func__);
3444 }
3445
3446 uport->irq = core_irqres;
3447 msm_uport->bam_irq = bam_irqres;
3448 pdata->wakeup_irq = wakeup_irqres;
3449
3450 msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3451 if (!msm_uport->bus_scale_table) {
3452 MSM_HS_ERR("BLSP UART: Bus scaling is disabled.\n");
3453 } else {
3454 msm_uport->bus_perf_client =
3455 msm_bus_scale_register_client
3456 (msm_uport->bus_scale_table);
3457 if (IS_ERR(&msm_uport->bus_perf_client)) {
3458 MSM_HS_ERR("%s():Bus client register failed\n",
3459 __func__);
3460 ret = -EINVAL;
3461 goto unmap_memory;
3462 }
3463 }
3464
3465 msm_uport->wakeup.irq = pdata->wakeup_irq;
3466 msm_uport->wakeup.ignore = 1;
3467 msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
3468 msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
3469 msm_uport->obs = pdata->obs;
3470
3471 msm_uport->bam_tx_ep_pipe_index =
3472 pdata->bam_tx_ep_pipe_index;
3473 msm_uport->bam_rx_ep_pipe_index =
3474 pdata->bam_rx_ep_pipe_index;
3475 msm_uport->wakeup.enabled = true;
3476
3477 uport->iotype = UPIO_MEM;
3478 uport->fifosize = 64;
3479 uport->ops = &msm_hs_ops;
3480 uport->flags = UPF_BOOT_AUTOCONF;
3481 uport->uartclk = 7372800;
3482 msm_uport->imr_reg = 0x0;
3483
3484 msm_uport->clk = clk_get(&pdev->dev, "core_clk");
3485 if (IS_ERR(msm_uport->clk)) {
3486 ret = PTR_ERR(msm_uport->clk);
3487 goto deregister_bus_client;
3488 }
3489
3490 msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
3491 /*
3492 * Some configurations do not require explicit pclk control so
3493 * do not flag error on pclk get failure.
3494 */
3495 if (IS_ERR(msm_uport->pclk))
3496 msm_uport->pclk = NULL;
3497
3498 msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
3499 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
3500 if (!msm_uport->hsuart_wq) {
3501 MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
3502 __func__);
3503 ret = -ENOMEM;
3504 goto put_clk;
3505 }
3506
3507 mutex_init(&msm_uport->mtx);
3508
3509 /* Initialize SPS HW connected with UART core */
3510 ret = msm_hs_sps_init(msm_uport);
3511 if (unlikely(ret)) {
3512 MSM_HS_ERR("SPS Initialization failed ! err=%d", ret);
3513 goto destroy_mutex;
3514 }
3515
3516 msm_uport->tx.flush = FLUSH_SHUTDOWN;
3517 msm_uport->rx.flush = FLUSH_SHUTDOWN;
3518
3519 memset(name, 0, sizeof(name));
3520 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3521 "_tx");
3522 msm_uport->tx.ipc_tx_ctxt =
3523 ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
3524 if (!msm_uport->tx.ipc_tx_ctxt)
3525 dev_err(&pdev->dev, "%s: error creating tx logging context",
3526 __func__);
3527
3528 memset(name, 0, sizeof(name));
3529 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3530 "_rx");
3531 msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
3532 IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
3533 if (!msm_uport->rx.ipc_rx_ctxt)
3534 dev_err(&pdev->dev, "%s: error creating rx logging context",
3535 __func__);
3536
3537 memset(name, 0, sizeof(name));
3538 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3539 "_pwr");
3540 msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
3541 IPC_MSM_HS_LOG_USER_PAGES, name, 0);
3542 if (!msm_uport->ipc_msm_hs_pwr_ctxt)
3543 dev_err(&pdev->dev, "%s: error creating usr logging context",
3544 __func__);
3545
3546 uport->irq = core_irqres;
3547 msm_uport->bam_irq = bam_irqres;
3548
3549 clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
3550 msm_hs_clk_bus_vote(msm_uport);
3551 ret = uartdm_init_port(uport);
3552 if (unlikely(ret))
3553 goto err_clock;
3554
3555 /* configure the CR Protection to Enable */
3556 msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
3557
3558 /*
3559 * Enable Command register protection before going ahead as this hw
3560 * configuration makes sure that issued cmd to CR register gets complete
3561 * before next issued cmd start. Hence mb() requires here.
3562 */
3563 mb();
3564
3565 /*
3566 * Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
3567 * so any rx_break and character having parity of framing
3568 * error don't enter inside UART RX FIFO.
3569 */
3570 data = msm_hs_read(uport, UART_DM_MR2);
3571 data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
3572 UARTDM_MR2_RX_ERROR_CHAR_OFF);
3573 msm_hs_write(uport, UART_DM_MR2, data);
3574 /* Ensure register IO completion */
3575 mb();
3576
3577 ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
3578 if (unlikely(ret)) {
3579 MSM_HS_ERR("Probe Failed as sysfs failed\n");
3580 goto err_clock;
3581 }
3582
3583 msm_serial_debugfs_init(msm_uport, pdev->id);
3584 msm_hs_unconfig_uart_gpios(uport);
3585
3586 uport->line = pdev->id;
3587 if (pdata->userid && pdata->userid <= UARTDM_NR)
3588 uport->line = pdata->userid;
3589 ret = uart_add_one_port(&msm_hs_driver, uport);
3590 if (!ret) {
3591 msm_hs_clk_bus_unvote(msm_uport);
3592 msm_serial_hs_rt_init(uport);
3593 return ret;
3594 }
3595
3596err_clock:
3597 msm_hs_clk_bus_unvote(msm_uport);
3598
3599destroy_mutex:
3600 mutex_destroy(&msm_uport->mtx);
3601 destroy_workqueue(msm_uport->hsuart_wq);
3602
3603put_clk:
3604 if (msm_uport->pclk)
3605 clk_put(msm_uport->pclk);
3606
3607 if (msm_uport->clk)
3608 clk_put(msm_uport->clk);
3609
3610deregister_bus_client:
3611 msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
3612unmap_memory:
3613 iounmap(uport->membase);
3614 iounmap(msm_uport->bam_base);
3615
3616 return ret;
3617}
3618
3619static int __init msm_serial_hs_init(void)
3620{
3621 int ret;
3622
3623 ret = uart_register_driver(&msm_hs_driver);
3624 if (unlikely(ret)) {
3625 pr_err("%s failed to load\n", __func__);
3626 return ret;
3627 }
3628 debug_base = debugfs_create_dir("msm_serial_hs", NULL);
3629 if (IS_ERR_OR_NULL(debug_base))
3630 pr_err("msm_serial_hs: Cannot create debugfs dir\n");
3631
3632 ret = platform_driver_register(&msm_serial_hs_platform_driver);
3633 if (ret) {
3634 pr_err("%s failed to load\n", __func__);
3635 debugfs_remove_recursive(debug_base);
3636 uart_unregister_driver(&msm_hs_driver);
3637 return ret;
3638 }
3639
3640 pr_info("msm_serial_hs module loaded\n");
3641 return ret;
3642}
3643
3644/*
3645 * Called by the upper layer when port is closed.
3646 * - Disables the port
3647 * - Unhook the ISR
3648 */
3649static void msm_hs_shutdown(struct uart_port *uport)
3650{
3651 int ret, rc;
3652 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
3653 struct circ_buf *tx_buf = &uport->state->xmit;
3654 int data;
3655 unsigned long flags;
3656
3657 if (is_use_low_power_wakeup(msm_uport))
3658 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
3659
3660 if (msm_uport->wakeup.enabled)
3661 disable_irq(msm_uport->wakeup.irq);
3662 else
3663 disable_irq(uport->irq);
3664
3665 spin_lock_irqsave(&uport->lock, flags);
3666 msm_uport->wakeup.enabled = false;
3667 msm_uport->wakeup.ignore = 1;
3668 spin_unlock_irqrestore(&uport->lock, flags);
3669
3670 /* Free the interrupt */
3671 free_irq(uport->irq, msm_uport);
3672 if (is_use_low_power_wakeup(msm_uport)) {
3673 free_irq(msm_uport->wakeup.irq, msm_uport);
3674 MSM_HS_DBG("%s(): wakeup irq freed", __func__);
3675 }
3676 msm_uport->wakeup.freed = true;
3677
3678 /* make sure tx lh finishes */
3679 kthread_flush_worker(&msm_uport->tx.kworker);
3680 ret = wait_event_timeout(msm_uport->tx.wait,
3681 uart_circ_empty(tx_buf), 500);
3682 if (!ret)
3683 MSM_HS_WARN("Shutdown called when tx buff not empty");
3684
3685 msm_hs_resource_vote(msm_uport);
3686 /* Stop remote side from sending data */
3687 msm_hs_disable_flow_control(uport, false);
3688 /* make sure rx lh finishes */
3689 kthread_flush_worker(&msm_uport->rx.kworker);
3690
3691 if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
3692 /* disable and disconnect rx */
3693 ret = wait_event_timeout(msm_uport->rx.wait,
3694 !msm_uport->rx.pending_flag, 500);
3695 if (!ret)
3696 MSM_HS_WARN("%s(): rx disconnect not complete",
3697 __func__);
3698 msm_hs_disconnect_rx(uport);
3699 }
3700
3701 cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
3702 flush_workqueue(msm_uport->hsuart_wq);
3703
3704 /* BAM Disconnect for TX */
3705 data = msm_hs_read(uport, UART_DM_DMEN);
3706 data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
3707 msm_hs_write(uport, UART_DM_DMEN, data);
3708 ret = sps_tx_disconnect(msm_uport);
3709 if (ret)
3710 MSM_HS_ERR("%s(): sps_disconnect failed\n",
3711 __func__);
3712 msm_uport->tx.flush = FLUSH_SHUTDOWN;
3713 /* Disable the transmitter */
3714 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
3715 /* Disable the receiver */
3716 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
3717
3718 msm_uport->imr_reg = 0;
3719 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
3720 /*
3721 * Complete all device write before actually disabling uartclk.
3722 * Hence mb() requires here.
3723 */
3724 mb();
3725
3726 msm_uport->rx.buffer_pending = NONE_PENDING;
3727 MSM_HS_DBG("%s(): tx, rx events complete", __func__);
3728
3729 dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
3730 UART_XMIT_SIZE, DMA_TO_DEVICE);
3731
3732 msm_hs_resource_unvote(msm_uport);
3733 rc = atomic_read(&msm_uport->resource_count);
3734 if (rc) {
3735 atomic_set(&msm_uport->resource_count, 1);
3736 MSM_HS_WARN("%s(): removing extra vote\n", __func__);
3737 msm_hs_resource_unvote(msm_uport);
3738 }
3739 if (atomic_read(&msm_uport->client_req_state)) {
3740 MSM_HS_WARN("%s: Client clock vote imbalance\n", __func__);
3741 atomic_set(&msm_uport->client_req_state, 0);
3742 }
3743 if (atomic_read(&msm_uport->client_count)) {
3744 MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
3745 atomic_set(&msm_uport->client_count, 0);
3746 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3747 "%s: Client_Count 0\n", __func__);
3748 }
3749 msm_hs_unconfig_uart_gpios(uport);
3750 MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
3751}
3752
3753static void __exit msm_serial_hs_exit(void)
3754{
3755 pr_info("msm_serial_hs module removed\n");
3756 debugfs_remove_recursive(debug_base);
3757 platform_driver_unregister(&msm_serial_hs_platform_driver);
3758 uart_unregister_driver(&msm_hs_driver);
3759}
3760
3761static const struct dev_pm_ops msm_hs_dev_pm_ops = {
3762 .runtime_suspend = msm_hs_runtime_suspend,
3763 .runtime_resume = msm_hs_runtime_resume,
3764 .runtime_idle = NULL,
3765 .suspend_noirq = msm_hs_pm_sys_suspend_noirq,
3766 .resume_noirq = msm_hs_pm_sys_resume_noirq,
3767};
3768
3769static struct platform_driver msm_serial_hs_platform_driver = {
3770 .probe = msm_hs_probe,
3771 .remove = msm_hs_remove,
3772 .driver = {
3773 .name = "msm_serial_hs",
3774 .pm = &msm_hs_dev_pm_ops,
3775 .of_match_table = msm_hs_match_table,
3776 },
3777};
3778
3779static struct uart_driver msm_hs_driver = {
3780 .owner = THIS_MODULE,
3781 .driver_name = "msm_serial_hs",
3782 .dev_name = "ttyHS",
3783 .nr = UARTDM_NR,
3784 .cons = 0,
3785};
3786
3787static const struct uart_ops msm_hs_ops = {
3788 .tx_empty = msm_hs_tx_empty,
3789 .set_mctrl = msm_hs_set_mctrl_locked,
3790 .get_mctrl = msm_hs_get_mctrl_locked,
3791 .stop_tx = msm_hs_stop_tx_locked,
3792 .start_tx = msm_hs_start_tx_locked,
3793 .stop_rx = msm_hs_stop_rx_locked,
3794 .enable_ms = msm_hs_enable_ms_locked,
3795 .break_ctl = msm_hs_break_ctl,
3796 .startup = msm_hs_startup,
3797 .shutdown = msm_hs_shutdown,
3798 .set_termios = msm_hs_set_termios,
3799 .type = msm_hs_type,
3800 .config_port = msm_hs_config_port,
3801 .flush_buffer = NULL,
3802 .ioctl = msm_hs_ioctl,
3803};
3804
3805module_init(msm_serial_hs_init);
3806module_exit(msm_serial_hs_exit);
3807MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
3808MODULE_LICENSE("GPL v2");