blob: a2522c6d781b5e2c0e443dd69da10ee1f7de104f [file] [log] [blame]
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05301/* drivers/serial/msm_serial_hs.c
2 *
3 * MSM 7k High speed uart driver
4 *
5 * Copyright (c) 2008 Google Inc.
Mukesh Kumar Savaliya8fa1c822018-03-27 00:00:35 +05306 * Copyright (c) 2007-2018, The Linux Foundation. All rights reserved.
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05307 * Modified: Nick Pelly <npelly@google.com>
8 *
9 * All source code in this file is licensed under the following license
10 * except where indicated.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * version 2 as published by the Free Software Foundation.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * Has optional support for uart power management independent of linux
22 * suspend/resume:
23 *
24 * RX wakeup.
25 * UART wakeup can be triggered by RX activity (using a wakeup GPIO on the
26 * UART RX pin). This should only be used if there is not a wakeup
27 * GPIO on the UART CTS, and the first RX byte is known (for example, with the
28 * Bluetooth Texas Instruments HCILL protocol), since the first RX byte will
29 * always be lost. RTS will be asserted even while the UART is off in this mode
30 * of operation. See msm_serial_hs_platform_data.rx_wakeup_irq.
31 */
32
33#include <linux/module.h>
34
35#include <linux/serial.h>
36#include <linux/serial_core.h>
37#include <linux/slab.h>
38#include <linux/init.h>
39#include <linux/interrupt.h>
40#include <linux/irq.h>
41#include <linux/io.h>
42#include <linux/ioport.h>
43#include <linux/atomic.h>
44#include <linux/kernel.h>
45#include <linux/timer.h>
46#include <linux/clk.h>
47#include <linux/delay.h>
48#include <linux/platform_device.h>
49#include <linux/pm_runtime.h>
50#include <linux/dma-mapping.h>
51#include <linux/tty_flip.h>
52#include <linux/wait.h>
53#include <linux/sysfs.h>
54#include <linux/stat.h>
55#include <linux/device.h>
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +053056#include <linux/debugfs.h>
57#include <linux/of.h>
58#include <linux/of_device.h>
59#include <linux/of_gpio.h>
60#include <linux/gpio.h>
61#include <linux/ipc_logging.h>
62#include <asm/irq.h>
63#include <linux/kthread.h>
64
65#include <linux/msm-sps.h>
66#include <linux/platform_data/msm_serial_hs.h>
67#include <linux/msm-bus.h>
68
69#include "msm_serial_hs_hwreg.h"
70#define UART_SPS_CONS_PERIPHERAL 0
71#define UART_SPS_PROD_PERIPHERAL 1
72
73#define IPC_MSM_HS_LOG_STATE_PAGES 2
74#define IPC_MSM_HS_LOG_USER_PAGES 2
75#define IPC_MSM_HS_LOG_DATA_PAGES 3
76#define UART_DMA_DESC_NR 8
77#define BUF_DUMP_SIZE 32
78
79/* If the debug_mask gets set to FATAL_LEV,
80 * a fatal error has happened and further IPC logging
81 * is disabled so that this problem can be detected
82 */
83enum {
84 FATAL_LEV = 0U,
85 ERR_LEV = 1U,
86 WARN_LEV = 2U,
87 INFO_LEV = 3U,
88 DBG_LEV = 4U,
89};
90
91#define MSM_HS_DBG(x...) do { \
92 if (msm_uport->ipc_debug_mask >= DBG_LEV) { \
93 if (msm_uport->ipc_msm_hs_log_ctxt) \
94 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
95 } \
96} while (0)
97
98#define MSM_HS_INFO(x...) do { \
99 if (msm_uport->ipc_debug_mask >= INFO_LEV) {\
100 if (msm_uport->ipc_msm_hs_log_ctxt) \
101 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
102 } \
103} while (0)
104
105/* warnings and errors show up on console always */
106#define MSM_HS_WARN(x...) do { \
107 pr_warn(x); \
108 if (msm_uport->ipc_msm_hs_log_ctxt && \
109 msm_uport->ipc_debug_mask >= WARN_LEV) \
110 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
111} while (0)
112
113/* ERROR condition in the driver sets the hs_serial_debug_mask
114 * to ERR_FATAL level, so that this message can be seen
115 * in IPC logging. Further errors continue to log on the console
116 */
117#define MSM_HS_ERR(x...) do { \
118 pr_err(x); \
119 if (msm_uport->ipc_msm_hs_log_ctxt && \
120 msm_uport->ipc_debug_mask >= ERR_LEV) { \
121 ipc_log_string(msm_uport->ipc_msm_hs_log_ctxt, x); \
122 msm_uport->ipc_debug_mask = FATAL_LEV; \
123 } \
124} while (0)
125
126#define LOG_USR_MSG(ctx, x...) do { \
127 if (ctx) \
128 ipc_log_string(ctx, x); \
129} while (0)
130
131/*
132 * There are 3 different kind of UART Core available on MSM.
133 * High Speed UART (i.e. Legacy HSUART), GSBI based HSUART
134 * and BSLP based HSUART.
135 */
136enum uart_core_type {
137 LEGACY_HSUART,
138 GSBI_HSUART,
139 BLSP_HSUART,
140};
141
142enum flush_reason {
143 FLUSH_NONE,
144 FLUSH_DATA_READY,
145 FLUSH_DATA_INVALID, /* values after this indicate invalid data */
146 FLUSH_IGNORE,
147 FLUSH_STOP,
148 FLUSH_SHUTDOWN,
149};
150
151/*
152 * SPS data structures to support HSUART with BAM
153 * @sps_pipe - This struct defines BAM pipe descriptor
154 * @sps_connect - This struct defines a connection's end point
155 * @sps_register - This struct defines a event registration parameters
156 */
157struct msm_hs_sps_ep_conn_data {
158 struct sps_pipe *pipe_handle;
159 struct sps_connect config;
160 struct sps_register_event event;
161};
162
163struct msm_hs_tx {
164 bool dma_in_flight; /* tx dma in progress */
165 enum flush_reason flush;
166 wait_queue_head_t wait;
167 int tx_count;
168 dma_addr_t dma_base;
169 struct kthread_work kwork;
170 struct kthread_worker kworker;
171 struct task_struct *task;
172 struct msm_hs_sps_ep_conn_data cons;
173 struct timer_list tx_timeout_timer;
174 void *ipc_tx_ctxt;
175};
176
177struct msm_hs_rx {
178 enum flush_reason flush;
179 wait_queue_head_t wait;
180 dma_addr_t rbuffer;
181 unsigned char *buffer;
182 unsigned int buffer_pending;
183 struct delayed_work flip_insert_work;
184 struct kthread_work kwork;
185 struct kthread_worker kworker;
186 struct task_struct *task;
187 struct msm_hs_sps_ep_conn_data prod;
188 unsigned long queued_flag;
189 unsigned long pending_flag;
190 int rx_inx;
191 struct sps_iovec iovec[UART_DMA_DESC_NR]; /* track descriptors */
192 void *ipc_rx_ctxt;
193};
194enum buffer_states {
195 NONE_PENDING = 0x0,
196 FIFO_OVERRUN = 0x1,
197 PARITY_ERROR = 0x2,
198 CHARS_NORMAL = 0x4,
199};
200
201enum msm_hs_pm_state {
202 MSM_HS_PM_ACTIVE,
203 MSM_HS_PM_SUSPENDED,
204 MSM_HS_PM_SYS_SUSPENDED,
205};
206
207/* optional low power wakeup, typically on a GPIO RX irq */
208struct msm_hs_wakeup {
209 int irq; /* < 0 indicates low power wakeup disabled */
210 unsigned char ignore; /* bool */
211
212 /* bool: inject char into rx tty on wakeup */
213 bool inject_rx;
214 unsigned char rx_to_inject;
215 bool enabled;
216 bool freed;
217};
218
219struct msm_hs_port {
220 struct uart_port uport;
221 unsigned long imr_reg; /* shadow value of UARTDM_IMR */
222 struct clk *clk;
223 struct clk *pclk;
224 struct msm_hs_tx tx;
225 struct msm_hs_rx rx;
226 atomic_t resource_count;
227 struct msm_hs_wakeup wakeup;
228
229 struct dentry *loopback_dir;
230 struct work_struct clock_off_w; /* work for actual clock off */
231 struct workqueue_struct *hsuart_wq; /* hsuart workqueue */
232 struct mutex mtx; /* resource access mutex */
233 enum uart_core_type uart_type;
234 unsigned long bam_handle;
235 resource_size_t bam_mem;
236 int bam_irq;
237 unsigned char __iomem *bam_base;
238 unsigned int bam_tx_ep_pipe_index;
239 unsigned int bam_rx_ep_pipe_index;
240 /* struct sps_event_notify is an argument passed when triggering a
241 * callback event object registered for an SPS connection end point.
242 */
243 struct sps_event_notify notify;
244 /* bus client handler */
245 u32 bus_perf_client;
246 /* BLSP UART required BUS Scaling data */
247 struct msm_bus_scale_pdata *bus_scale_table;
248 bool rx_bam_inprogress;
249 wait_queue_head_t bam_disconnect_wait;
250 bool use_pinctrl;
251 struct pinctrl *pinctrl;
252 struct pinctrl_state *gpio_state_active;
253 struct pinctrl_state *gpio_state_suspend;
254 bool flow_control;
255 enum msm_hs_pm_state pm_state;
256 atomic_t client_count;
257 bool obs; /* out of band sleep flag */
258 atomic_t client_req_state;
259 void *ipc_msm_hs_log_ctxt;
260 void *ipc_msm_hs_pwr_ctxt;
261 int ipc_debug_mask;
262};
263
264static const struct of_device_id msm_hs_match_table[] = {
265 { .compatible = "qcom,msm-hsuart-v14"},
266 {}
267};
268
269
270#define MSM_UARTDM_BURST_SIZE 16 /* DM burst size (in bytes) */
271#define UARTDM_TX_BUF_SIZE UART_XMIT_SIZE
272#define UARTDM_RX_BUF_SIZE 512
273#define RETRY_TIMEOUT 5
274#define UARTDM_NR 256
275#define BAM_PIPE_MIN 0
276#define BAM_PIPE_MAX 11
277#define BUS_SCALING 1
278#define BUS_RESET 0
279#define RX_FLUSH_COMPLETE_TIMEOUT 300 /* In jiffies */
280#define BLSP_UART_CLK_FMAX 63160000
281
282static struct dentry *debug_base;
283static struct platform_driver msm_serial_hs_platform_driver;
284static struct uart_driver msm_hs_driver;
285static const struct uart_ops msm_hs_ops;
286static void msm_hs_start_rx_locked(struct uart_port *uport);
287static void msm_serial_hs_rx_work(struct kthread_work *work);
288static void flip_insert_work(struct work_struct *work);
289static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote);
290static struct msm_hs_port *msm_hs_get_hs_port(int port_index);
291static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport);
292static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport);
293static int msm_hs_pm_resume(struct device *dev);
294
295#define UARTDM_TO_MSM(uart_port) \
296 container_of((uart_port), struct msm_hs_port, uport)
297
298static int msm_hs_ioctl(struct uart_port *uport, unsigned int cmd,
299 unsigned long arg)
300{
301 int ret = 0, state = 1;
302 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
303
304 if (!msm_uport)
305 return -ENODEV;
306
307 switch (cmd) {
308 case MSM_ENABLE_UART_CLOCK: {
309 ret = msm_hs_request_clock_on(&msm_uport->uport);
310 break;
311 }
312 case MSM_DISABLE_UART_CLOCK: {
313 ret = msm_hs_request_clock_off(&msm_uport->uport);
314 break;
315 }
316 case MSM_GET_UART_CLOCK_STATUS: {
317 /* Return value 0 - UART CLOCK is OFF
318 * Return value 1 - UART CLOCK is ON
319 */
320
321 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
322 state = 0;
323 ret = state;
324 MSM_HS_INFO("%s():GET UART CLOCK STATUS: cmd=%d state=%d\n",
325 __func__, cmd, state);
326 break;
327 }
328 default: {
329 MSM_HS_INFO("%s():Unknown cmd specified: cmd=%d\n", __func__,
330 cmd);
331 ret = -ENOIOCTLCMD;
332 break;
333 }
334 }
335
336 return ret;
337}
338
339/*
340 * This function is called initially during probe and then
341 * through the runtime PM framework. The function directly calls
342 * resource APIs to enable them.
343 */
344
345static int msm_hs_clk_bus_vote(struct msm_hs_port *msm_uport)
346{
347 int rc = 0;
348
349 msm_hs_bus_voting(msm_uport, BUS_SCALING);
350 /* Turn on core clk and iface clk */
351 if (msm_uport->pclk) {
352 rc = clk_prepare_enable(msm_uport->pclk);
353 if (rc) {
354 dev_err(msm_uport->uport.dev,
355 "%s: Could not turn on pclk [%d]\n",
356 __func__, rc);
357 goto busreset;
358 }
359 }
360 rc = clk_prepare_enable(msm_uport->clk);
361 if (rc) {
362 dev_err(msm_uport->uport.dev,
363 "%s: Could not turn on core clk [%d]\n",
364 __func__, rc);
365 goto core_unprepare;
366 }
367 MSM_HS_DBG("%s: Clock ON successful\n", __func__);
368 return rc;
369core_unprepare:
370 clk_disable_unprepare(msm_uport->pclk);
371busreset:
372 msm_hs_bus_voting(msm_uport, BUS_RESET);
373 return rc;
374}
375
376/*
377 * This function is called initially during probe and then
378 * through the runtime PM framework. The function directly calls
379 * resource apis to disable them.
380 */
381static void msm_hs_clk_bus_unvote(struct msm_hs_port *msm_uport)
382{
383 clk_disable_unprepare(msm_uport->clk);
384 if (msm_uport->pclk)
385 clk_disable_unprepare(msm_uport->pclk);
386 msm_hs_bus_voting(msm_uport, BUS_RESET);
387 MSM_HS_DBG("%s: Clock OFF successful\n", __func__);
388}
389
390 /* Remove vote for resources when done */
391static void msm_hs_resource_unvote(struct msm_hs_port *msm_uport)
392{
393 struct uart_port *uport = &(msm_uport->uport);
394 int rc = atomic_read(&msm_uport->resource_count);
395
396 MSM_HS_DBG("%s(): power usage count %d", __func__, rc);
397 if (rc <= 0) {
398 MSM_HS_WARN("%s(): rc zero, bailing\n", __func__);
399 WARN_ON(1);
400 return;
401 }
402 atomic_dec(&msm_uport->resource_count);
403 pm_runtime_mark_last_busy(uport->dev);
404 pm_runtime_put_autosuspend(uport->dev);
405}
406
407 /* Vote for resources before accessing them */
408static void msm_hs_resource_vote(struct msm_hs_port *msm_uport)
409{
410 int ret;
411 struct uart_port *uport = &(msm_uport->uport);
412
413 ret = pm_runtime_get_sync(uport->dev);
414 if (ret < 0 || msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
415 MSM_HS_WARN("%s:%s runtime callback not invoked ret:%d st:%d",
416 __func__, dev_name(uport->dev), ret,
417 msm_uport->pm_state);
418 msm_hs_pm_resume(uport->dev);
419 }
420 atomic_inc(&msm_uport->resource_count);
421}
422
423/* Check if the uport line number matches with user id stored in pdata.
424 * User id information is stored during initialization. This function
425 * ensues that the same device is selected
426 */
427
428static struct msm_hs_port *get_matching_hs_port(struct platform_device *pdev)
429{
430 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
431 struct msm_hs_port *msm_uport = msm_hs_get_hs_port(pdev->id);
432
433 if ((!msm_uport) || (msm_uport->uport.line != pdev->id
434 && msm_uport->uport.line != pdata->userid)) {
435 pr_err("uport line number mismatch!");
436 WARN_ON(1);
437 return NULL;
438 }
439
440 return msm_uport;
441}
442
443static ssize_t show_clock(struct device *dev, struct device_attribute *attr,
444 char *buf)
445{
446 int state = 1;
447 ssize_t ret = 0;
448 struct platform_device *pdev = container_of(dev, struct
449 platform_device, dev);
450 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
451
452 /* This check should not fail */
453 if (msm_uport) {
454 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
455 state = 0;
456 ret = snprintf(buf, PAGE_SIZE, "%d\n", state);
457 }
458 return ret;
459}
460
461static ssize_t set_clock(struct device *dev, struct device_attribute *attr,
462 const char *buf, size_t count)
463{
464 int state;
465 ssize_t ret = 0;
466 struct platform_device *pdev = container_of(dev, struct
467 platform_device, dev);
468 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
469
470 /* This check should not fail */
471 if (msm_uport) {
472 state = buf[0] - '0';
473 switch (state) {
474 case 0:
475 MSM_HS_DBG("%s: Request clock OFF\n", __func__);
476 msm_hs_request_clock_off(&msm_uport->uport);
477 ret = count;
478 break;
479 case 1:
480 MSM_HS_DBG("%s: Request clock ON\n", __func__);
481 msm_hs_request_clock_on(&msm_uport->uport);
482 ret = count;
483 break;
484 default:
485 ret = -EINVAL;
486 }
487 }
488 return ret;
489}
490
491static DEVICE_ATTR(clock, 0644, show_clock, set_clock);
492
493static ssize_t show_debug_mask(struct device *dev,
494 struct device_attribute *attr, char *buf)
495{
496 ssize_t ret = 0;
497 struct platform_device *pdev = container_of(dev, struct
498 platform_device, dev);
499 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
500
501 /* This check should not fail */
502 if (msm_uport)
503 ret = snprintf(buf, sizeof(int), "%u\n",
504 msm_uport->ipc_debug_mask);
505 return ret;
506}
507
508static ssize_t set_debug_mask(struct device *dev,
509 struct device_attribute *attr,
510 const char *buf, size_t count)
511{
512 struct platform_device *pdev = container_of(dev, struct
513 platform_device, dev);
514 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
515
516 /* This check should not fail */
517 if (msm_uport) {
518 msm_uport->ipc_debug_mask = buf[0] - '0';
519 if (msm_uport->ipc_debug_mask < FATAL_LEV ||
520 msm_uport->ipc_debug_mask > DBG_LEV) {
521 /* set to default level */
522 msm_uport->ipc_debug_mask = INFO_LEV;
523 MSM_HS_ERR("Range is 0 to 4;Set to default level 3\n");
524 return -EINVAL;
525 }
526 }
527 return count;
528}
529
530static DEVICE_ATTR(debug_mask, 0644, show_debug_mask,
531 set_debug_mask);
532
533static inline bool is_use_low_power_wakeup(struct msm_hs_port *msm_uport)
534{
535 return msm_uport->wakeup.irq > 0;
536}
537
538static void msm_hs_bus_voting(struct msm_hs_port *msm_uport, unsigned int vote)
539{
540 int ret;
541
542 if (msm_uport->bus_perf_client) {
543 MSM_HS_DBG("Bus voting:%d\n", vote);
544 ret = msm_bus_scale_client_update_request(
545 msm_uport->bus_perf_client, vote);
546 if (ret)
547 MSM_HS_ERR("%s(): Failed for Bus voting: %d\n",
548 __func__, vote);
549 }
550}
551
552static inline unsigned int msm_hs_read(struct uart_port *uport,
553 unsigned int index)
554{
555 return readl_relaxed(uport->membase + index);
556}
557
558static inline void msm_hs_write(struct uart_port *uport, unsigned int index,
559 unsigned int value)
560{
561 writel_relaxed(value, uport->membase + index);
562}
563
564static int sps_rx_disconnect(struct sps_pipe *sps_pipe_handler)
565{
566 struct sps_connect config;
567 int ret;
568
569 ret = sps_get_config(sps_pipe_handler, &config);
570 if (ret) {
571 pr_err("%s: sps_get_config() failed ret %d\n", __func__, ret);
572 return ret;
573 }
574 config.options |= SPS_O_POLL;
575 ret = sps_set_config(sps_pipe_handler, &config);
576 if (ret) {
577 pr_err("%s: sps_set_config() failed ret %d\n", __func__, ret);
578 return ret;
579 }
580 return sps_disconnect(sps_pipe_handler);
581}
582
583static void hex_dump_ipc(struct msm_hs_port *msm_uport, void *ipc_ctx,
584 char *prefix, char *string, u64 addr, int size)
585
586{
587 char buf[(BUF_DUMP_SIZE * 3) + 2];
588 int len = 0;
589
590 len = min(size, BUF_DUMP_SIZE);
591 /*
592 * Print upto 32 data bytes, 32 bytes per line, 1 byte at a time and
593 * don't include the ASCII text at the end of the buffer.
594 */
595 hex_dump_to_buffer(string, len, 32, 1, buf, sizeof(buf), false);
596 ipc_log_string(ipc_ctx, "%s[0x%.10x:%d] : %s", prefix,
597 (unsigned int)addr, size, buf);
598}
599
600/*
601 * This API read and provides UART Core registers information.
602 */
603static void dump_uart_hs_registers(struct msm_hs_port *msm_uport)
604{
605 struct uart_port *uport = &(msm_uport->uport);
606
607 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
608 MSM_HS_INFO("%s:Failed clocks are off, resource_count %d",
609 __func__, atomic_read(&msm_uport->resource_count));
610 return;
611 }
612
613 MSM_HS_DBG(
614 "MR1:%x MR2:%x TFWR:%x RFWR:%x DMEN:%x IMR:%x MISR:%x NCF_TX:%x\n",
615 msm_hs_read(uport, UART_DM_MR1),
616 msm_hs_read(uport, UART_DM_MR2),
617 msm_hs_read(uport, UART_DM_TFWR),
618 msm_hs_read(uport, UART_DM_RFWR),
619 msm_hs_read(uport, UART_DM_DMEN),
620 msm_hs_read(uport, UART_DM_IMR),
621 msm_hs_read(uport, UART_DM_MISR),
622 msm_hs_read(uport, UART_DM_NCF_TX));
623 MSM_HS_INFO("SR:%x ISR:%x DMRX:%x RX_SNAP:%x TXFS:%x RXFS:%x\n",
624 msm_hs_read(uport, UART_DM_SR),
625 msm_hs_read(uport, UART_DM_ISR),
626 msm_hs_read(uport, UART_DM_DMRX),
627 msm_hs_read(uport, UART_DM_RX_TOTAL_SNAP),
628 msm_hs_read(uport, UART_DM_TXFS),
629 msm_hs_read(uport, UART_DM_RXFS));
630 MSM_HS_DBG("rx.flush:%u\n", msm_uport->rx.flush);
631}
632
633static int msm_serial_loopback_enable_set(void *data, u64 val)
634{
635 struct msm_hs_port *msm_uport = data;
636 struct uart_port *uport = &(msm_uport->uport);
637 unsigned long flags;
638 int ret = 0;
639
640 msm_hs_resource_vote(msm_uport);
641
642 if (val) {
643 spin_lock_irqsave(&uport->lock, flags);
644 ret = msm_hs_read(uport, UART_DM_MR2);
645 ret |= (UARTDM_MR2_LOOP_MODE_BMSK |
646 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
647 msm_hs_write(uport, UART_DM_MR2, ret);
648 spin_unlock_irqrestore(&uport->lock, flags);
649 } else {
650 spin_lock_irqsave(&uport->lock, flags);
651 ret = msm_hs_read(uport, UART_DM_MR2);
652 ret &= ~(UARTDM_MR2_LOOP_MODE_BMSK |
653 UARTDM_MR2_RFR_CTS_LOOP_MODE_BMSK);
654 msm_hs_write(uport, UART_DM_MR2, ret);
655 spin_unlock_irqrestore(&uport->lock, flags);
656 }
657 /* Calling CLOCK API. Hence mb() requires here. */
658 mb();
659
660 msm_hs_resource_unvote(msm_uport);
661 return 0;
662}
663
664static int msm_serial_loopback_enable_get(void *data, u64 *val)
665{
666 struct msm_hs_port *msm_uport = data;
667 struct uart_port *uport = &(msm_uport->uport);
668 unsigned long flags;
669 int ret = 0;
670
671 msm_hs_resource_vote(msm_uport);
672
673 spin_lock_irqsave(&uport->lock, flags);
674 ret = msm_hs_read(&msm_uport->uport, UART_DM_MR2);
675 spin_unlock_irqrestore(&uport->lock, flags);
676
677 msm_hs_resource_unvote(msm_uport);
678
679 *val = (ret & UARTDM_MR2_LOOP_MODE_BMSK) ? 1 : 0;
680
681 return 0;
682}
683DEFINE_SIMPLE_ATTRIBUTE(loopback_enable_fops, msm_serial_loopback_enable_get,
684 msm_serial_loopback_enable_set, "%llu\n");
685
686/*
687 * msm_serial_hs debugfs node: <debugfs_root>/msm_serial_hs/loopback.<id>
688 * writing 1 turns on internal loopback mode in HW. Useful for automation
689 * test scripts.
690 * writing 0 disables the internal loopback mode. Default is disabled.
691 */
692static void msm_serial_debugfs_init(struct msm_hs_port *msm_uport,
693 int id)
694{
695 char node_name[15];
696
697 snprintf(node_name, sizeof(node_name), "loopback.%d", id);
698 msm_uport->loopback_dir = debugfs_create_file(node_name,
699 0644,
700 debug_base,
701 msm_uport,
702 &loopback_enable_fops);
703
704 if (IS_ERR_OR_NULL(msm_uport->loopback_dir))
705 MSM_HS_ERR("%s(): Cannot create loopback.%d debug entry",
706 __func__, id);
707}
708
709static int msm_hs_remove(struct platform_device *pdev)
710{
711
712 struct msm_hs_port *msm_uport;
713 struct device *dev;
714
715 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
716 pr_err("Invalid plaform device ID = %d\n", pdev->id);
717 return -EINVAL;
718 }
719
720 msm_uport = get_matching_hs_port(pdev);
721 if (!msm_uport)
722 return -EINVAL;
723
724 dev = msm_uport->uport.dev;
725 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_clock.attr);
726 sysfs_remove_file(&pdev->dev.kobj, &dev_attr_debug_mask.attr);
727 debugfs_remove(msm_uport->loopback_dir);
728
729 dma_free_coherent(msm_uport->uport.dev,
730 UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
731 msm_uport->rx.buffer, msm_uport->rx.rbuffer);
732
733 msm_uport->rx.buffer = NULL;
734 msm_uport->rx.rbuffer = 0;
735
736 destroy_workqueue(msm_uport->hsuart_wq);
737 mutex_destroy(&msm_uport->mtx);
738
739 uart_remove_one_port(&msm_hs_driver, &msm_uport->uport);
740 clk_put(msm_uport->clk);
741 if (msm_uport->pclk)
742 clk_put(msm_uport->pclk);
743
744 iounmap(msm_uport->uport.membase);
745
746 return 0;
747}
748
749
750/* Connect a UART peripheral's SPS endpoint(consumer endpoint)
751 *
752 * Also registers a SPS callback function for the consumer
753 * process with the SPS driver
754 *
755 * @uport - Pointer to uart uport structure
756 *
757 * @return - 0 if successful else negative value.
758 *
759 */
760
761static int msm_hs_spsconnect_tx(struct msm_hs_port *msm_uport)
762{
763 int ret;
764 struct uart_port *uport = &msm_uport->uport;
765 struct msm_hs_tx *tx = &msm_uport->tx;
766 struct sps_pipe *sps_pipe_handle = tx->cons.pipe_handle;
767 struct sps_connect *sps_config = &tx->cons.config;
768 struct sps_register_event *sps_event = &tx->cons.event;
769 unsigned long flags;
770 unsigned int data;
771
772 if (tx->flush != FLUSH_SHUTDOWN) {
773 MSM_HS_ERR("%s:Invalid flush state:%d\n", __func__, tx->flush);
774 return 0;
775 }
776
777 /* Establish connection between peripheral and memory endpoint */
778 ret = sps_connect(sps_pipe_handle, sps_config);
779 if (ret) {
780 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
781 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
782 return ret;
783 }
784 /* Register callback event for EOT (End of transfer) event. */
785 ret = sps_register_event(sps_pipe_handle, sps_event);
786 if (ret) {
787 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for tx!!\n"
788 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
789 goto reg_event_err;
790 }
791
792 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
793 msm_uport->tx.flush = FLUSH_STOP;
794 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
795
796 data = msm_hs_read(uport, UART_DM_DMEN);
797 /* Enable UARTDM Tx BAM Interface */
798 data |= UARTDM_TX_BAM_ENABLE_BMSK;
799 msm_hs_write(uport, UART_DM_DMEN, data);
800
801 msm_hs_write(uport, UART_DM_CR, RESET_TX);
802 msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
803 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
804
805 MSM_HS_DBG("%s(): TX Connect", __func__);
806 return 0;
807
808reg_event_err:
809 sps_disconnect(sps_pipe_handle);
810 return ret;
811}
812
813/* Connect a UART peripheral's SPS endpoint(producer endpoint)
814 *
815 * Also registers a SPS callback function for the producer
816 * process with the SPS driver
817 *
818 * @uport - Pointer to uart uport structure
819 *
820 * @return - 0 if successful else negative value.
821 *
822 */
823
824static int msm_hs_spsconnect_rx(struct uart_port *uport)
825{
826 int ret;
827 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
828 struct msm_hs_rx *rx = &msm_uport->rx;
829 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
830 struct sps_connect *sps_config = &rx->prod.config;
831 struct sps_register_event *sps_event = &rx->prod.event;
832 unsigned long flags;
833
834 /* Establish connection between peripheral and memory endpoint */
835 ret = sps_connect(sps_pipe_handle, sps_config);
836 if (ret) {
837 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
838 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
839 return ret;
840 }
841 /* Register callback event for DESC_DONE event. */
842 ret = sps_register_event(sps_pipe_handle, sps_event);
843 if (ret) {
844 MSM_HS_ERR("msm_serial_hs: sps_connect() failed for rx!!\n"
845 "pipe_handle=0x%p ret=%d", sps_pipe_handle, ret);
846 goto reg_event_err;
847 }
848 spin_lock_irqsave(&uport->lock, flags);
849 if (msm_uport->rx.pending_flag)
850 MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
851 __func__, msm_uport->rx.pending_flag);
852 msm_uport->rx.queued_flag = 0;
853 msm_uport->rx.pending_flag = 0;
854 msm_uport->rx.rx_inx = 0;
855 msm_uport->rx.flush = FLUSH_STOP;
856 spin_unlock_irqrestore(&uport->lock, flags);
857 MSM_HS_DBG("%s(): RX Connect\n", __func__);
858 return 0;
859
860reg_event_err:
861 sps_disconnect(sps_pipe_handle);
862 return ret;
863}
864
865/*
866 * programs the UARTDM_CSR register with correct bit rates
867 *
868 * Interrupts should be disabled before we are called, as
869 * we modify Set Baud rate
870 * Set receive stale interrupt level, dependent on Bit Rate
871 * Goal is to have around 8 ms before indicate stale.
872 * roundup (((Bit Rate * .008) / 10) + 1
873 */
874static void msm_hs_set_bps_locked(struct uart_port *uport,
875 unsigned int bps)
876{
877 unsigned long rxstale;
878 unsigned long data;
879 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
880
881 switch (bps) {
882 case 300:
883 msm_hs_write(uport, UART_DM_CSR, 0x00);
884 rxstale = 1;
885 break;
886 case 600:
887 msm_hs_write(uport, UART_DM_CSR, 0x11);
888 rxstale = 1;
889 break;
890 case 1200:
891 msm_hs_write(uport, UART_DM_CSR, 0x22);
892 rxstale = 1;
893 break;
894 case 2400:
895 msm_hs_write(uport, UART_DM_CSR, 0x33);
896 rxstale = 1;
897 break;
898 case 4800:
899 msm_hs_write(uport, UART_DM_CSR, 0x44);
900 rxstale = 1;
901 break;
902 case 9600:
903 msm_hs_write(uport, UART_DM_CSR, 0x55);
904 rxstale = 2;
905 break;
906 case 14400:
907 msm_hs_write(uport, UART_DM_CSR, 0x66);
908 rxstale = 3;
909 break;
910 case 19200:
911 msm_hs_write(uport, UART_DM_CSR, 0x77);
912 rxstale = 4;
913 break;
914 case 28800:
915 msm_hs_write(uport, UART_DM_CSR, 0x88);
916 rxstale = 6;
917 break;
918 case 38400:
919 msm_hs_write(uport, UART_DM_CSR, 0x99);
920 rxstale = 8;
921 break;
922 case 57600:
923 msm_hs_write(uport, UART_DM_CSR, 0xaa);
924 rxstale = 16;
925 break;
926 case 76800:
927 msm_hs_write(uport, UART_DM_CSR, 0xbb);
928 rxstale = 16;
929 break;
930 case 115200:
931 msm_hs_write(uport, UART_DM_CSR, 0xcc);
932 rxstale = 31;
933 break;
934 case 230400:
935 msm_hs_write(uport, UART_DM_CSR, 0xee);
936 rxstale = 31;
937 break;
938 case 460800:
939 msm_hs_write(uport, UART_DM_CSR, 0xff);
940 rxstale = 31;
941 break;
942 case 4000000:
943 case 3686400:
944 case 3200000:
945 case 3500000:
946 case 3000000:
947 case 2500000:
948 case 2000000:
949 case 1500000:
950 case 1152000:
951 case 1000000:
952 case 921600:
953 msm_hs_write(uport, UART_DM_CSR, 0xff);
954 rxstale = 31;
955 break;
956 default:
957 msm_hs_write(uport, UART_DM_CSR, 0xff);
958 /* default to 9600 */
959 bps = 9600;
960 rxstale = 2;
961 break;
962 }
963 /*
964 * uart baud rate depends on CSR and MND Values
965 * we are updating CSR before and then calling
966 * clk_set_rate which updates MND Values. Hence
967 * dsb requires here.
968 */
969 mb();
970 if (bps > 460800) {
971 uport->uartclk = bps * 16;
972 /* BLSP based UART supports maximum clock frequency
973 * of 63.16 Mhz. With this (63.16 Mhz) clock frequency
974 * UART can support baud rate of 3.94 Mbps which is
975 * equivalent to 4 Mbps.
976 * UART hardware is robust enough to handle this
977 * deviation to achieve baud rate ~4 Mbps.
978 */
979 if (bps == 4000000)
980 uport->uartclk = BLSP_UART_CLK_FMAX;
981 } else {
982 uport->uartclk = 7372800;
983 }
984
985 if (clk_set_rate(msm_uport->clk, uport->uartclk)) {
986 MSM_HS_WARN("Error setting clock rate on UART\n");
987 WARN_ON(1);
988 }
989
990 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
991 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
992
993 msm_hs_write(uport, UART_DM_IPR, data);
994 /*
995 * It is suggested to do reset of transmitter and receiver after
996 * changing any protocol configuration. Here Baud rate and stale
997 * timeout are getting updated. Hence reset transmitter and receiver.
998 */
999 msm_hs_write(uport, UART_DM_CR, RESET_TX);
1000 msm_hs_write(uport, UART_DM_CR, RESET_RX);
1001}
1002
1003
1004static void msm_hs_set_std_bps_locked(struct uart_port *uport,
1005 unsigned int bps)
1006{
1007 unsigned long rxstale;
1008 unsigned long data;
1009
1010 switch (bps) {
1011 case 9600:
1012 msm_hs_write(uport, UART_DM_CSR, 0x99);
1013 rxstale = 2;
1014 break;
1015 case 14400:
1016 msm_hs_write(uport, UART_DM_CSR, 0xaa);
1017 rxstale = 3;
1018 break;
1019 case 19200:
1020 msm_hs_write(uport, UART_DM_CSR, 0xbb);
1021 rxstale = 4;
1022 break;
1023 case 28800:
1024 msm_hs_write(uport, UART_DM_CSR, 0xcc);
1025 rxstale = 6;
1026 break;
1027 case 38400:
1028 msm_hs_write(uport, UART_DM_CSR, 0xdd);
1029 rxstale = 8;
1030 break;
1031 case 57600:
1032 msm_hs_write(uport, UART_DM_CSR, 0xee);
1033 rxstale = 16;
1034 break;
1035 case 115200:
1036 msm_hs_write(uport, UART_DM_CSR, 0xff);
1037 rxstale = 31;
1038 break;
1039 default:
1040 msm_hs_write(uport, UART_DM_CSR, 0x99);
1041 /* default to 9600 */
1042 bps = 9600;
1043 rxstale = 2;
1044 break;
1045 }
1046
1047 data = rxstale & UARTDM_IPR_STALE_LSB_BMSK;
1048 data |= UARTDM_IPR_STALE_TIMEOUT_MSB_BMSK & (rxstale << 2);
1049
1050 msm_hs_write(uport, UART_DM_IPR, data);
1051}
1052
1053static void msm_hs_enable_flow_control(struct uart_port *uport, bool override)
1054{
1055 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1056 unsigned int data;
1057
1058 if (msm_uport->flow_control || override) {
1059 /* Enable RFR line */
1060 msm_hs_write(uport, UART_DM_CR, RFR_LOW);
1061 /* Enable auto RFR */
1062 data = msm_hs_read(uport, UART_DM_MR1);
1063 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1064 msm_hs_write(uport, UART_DM_MR1, data);
1065 /* Ensure register IO completion */
1066 mb();
1067 }
1068}
1069
1070static void msm_hs_disable_flow_control(struct uart_port *uport, bool override)
1071{
1072 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1073 unsigned int data;
1074
1075 /*
1076 * Clear the Rx Ready Ctl bit - This ensures that
1077 * flow control lines stop the other side from sending
1078 * data while we change the parameters
1079 */
1080
1081 if (msm_uport->flow_control || override) {
1082 data = msm_hs_read(uport, UART_DM_MR1);
1083 /* disable auto ready-for-receiving */
1084 data &= ~UARTDM_MR1_RX_RDY_CTL_BMSK;
1085 msm_hs_write(uport, UART_DM_MR1, data);
1086 /* Disable RFR line */
1087 msm_hs_write(uport, UART_DM_CR, RFR_HIGH);
1088 /* Ensure register IO completion */
1089 mb();
1090 }
1091}
1092
1093/*
1094 * termios : new ktermios
1095 * oldtermios: old ktermios previous setting
1096 *
1097 * Configure the serial port
1098 */
1099static void msm_hs_set_termios(struct uart_port *uport,
1100 struct ktermios *termios,
1101 struct ktermios *oldtermios)
1102{
1103 unsigned int bps;
1104 unsigned long data;
1105 unsigned int c_cflag = termios->c_cflag;
1106 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1107
1108 /**
1109 * set_termios can be invoked from the framework when
1110 * the clocks are off and the client has not had a chance
1111 * to turn them on. Make sure that they are on
1112 */
1113 msm_hs_resource_vote(msm_uport);
1114 mutex_lock(&msm_uport->mtx);
1115 msm_hs_write(uport, UART_DM_IMR, 0);
1116
1117 msm_hs_disable_flow_control(uport, true);
1118
1119 /*
1120 * Disable Rx channel of UARTDM
1121 * DMA Rx Stall happens if enqueue and flush of Rx command happens
1122 * concurrently. Hence before changing the baud rate/protocol
1123 * configuration and sending flush command to ADM, disable the Rx
1124 * channel of UARTDM.
1125 * Note: should not reset the receiver here immediately as it is not
1126 * suggested to do disable/reset or reset/disable at the same time.
1127 */
1128 data = msm_hs_read(uport, UART_DM_DMEN);
1129 /* Disable UARTDM RX BAM Interface */
1130 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1131 msm_hs_write(uport, UART_DM_DMEN, data);
1132
1133 /*
1134 * Reset RX and TX.
1135 * Resetting the RX enables it, therefore we must reset and disable.
1136 */
1137 msm_hs_write(uport, UART_DM_CR, RESET_RX);
1138 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
1139 msm_hs_write(uport, UART_DM_CR, RESET_TX);
1140
1141 /* 300 is the minimum baud support by the driver */
1142 bps = uart_get_baud_rate(uport, termios, oldtermios, 200, 4000000);
1143
1144 /* Temporary remapping 200 BAUD to 3.2 mbps */
1145 if (bps == 200)
1146 bps = 3200000;
1147
1148 uport->uartclk = clk_get_rate(msm_uport->clk);
1149 if (!uport->uartclk)
1150 msm_hs_set_std_bps_locked(uport, bps);
1151 else
1152 msm_hs_set_bps_locked(uport, bps);
1153
1154 data = msm_hs_read(uport, UART_DM_MR2);
1155 data &= ~UARTDM_MR2_PARITY_MODE_BMSK;
1156 /* set parity */
1157 if (c_cflag & PARENB) {
1158 if (c_cflag & PARODD)
1159 data |= ODD_PARITY;
1160 else if (c_cflag & CMSPAR)
1161 data |= SPACE_PARITY;
1162 else
1163 data |= EVEN_PARITY;
1164 }
1165
1166 /* Set bits per char */
1167 data &= ~UARTDM_MR2_BITS_PER_CHAR_BMSK;
1168
1169 switch (c_cflag & CSIZE) {
1170 case CS5:
1171 data |= FIVE_BPC;
1172 break;
1173 case CS6:
1174 data |= SIX_BPC;
1175 break;
1176 case CS7:
1177 data |= SEVEN_BPC;
1178 break;
1179 default:
1180 data |= EIGHT_BPC;
1181 break;
1182 }
1183 /* stop bits */
1184 if (c_cflag & CSTOPB) {
1185 data |= STOP_BIT_TWO;
1186 } else {
1187 /* otherwise 1 stop bit */
1188 data |= STOP_BIT_ONE;
1189 }
1190 data |= UARTDM_MR2_ERROR_MODE_BMSK;
1191 /* write parity/bits per char/stop bit configuration */
1192 msm_hs_write(uport, UART_DM_MR2, data);
1193
1194 uport->ignore_status_mask = termios->c_iflag & INPCK;
1195 uport->ignore_status_mask |= termios->c_iflag & IGNPAR;
1196 uport->ignore_status_mask |= termios->c_iflag & IGNBRK;
1197
1198 uport->read_status_mask = (termios->c_cflag & CREAD);
1199
1200 /* Set Transmit software time out */
1201 uart_update_timeout(uport, c_cflag, bps);
1202
1203 /* Enable UARTDM Rx BAM Interface */
1204 data = msm_hs_read(uport, UART_DM_DMEN);
1205 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1206 msm_hs_write(uport, UART_DM_DMEN, data);
1207 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
1208 /* Issue TX,RX BAM Start IFC command */
1209 msm_hs_write(uport, UART_DM_CR, START_TX_BAM_IFC);
1210 msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
1211 /* Ensure Register Writes Complete */
1212 mb();
1213
1214 /* Configure HW flow control
1215 * UART Core would see status of CTS line when it is sending data
1216 * to remote uart to confirm that it can receive or not.
1217 * UART Core would trigger RFR if it is not having any space with
1218 * RX FIFO.
1219 */
1220 /* Pulling RFR line high */
1221 msm_hs_write(uport, UART_DM_CR, RFR_LOW);
1222 data = msm_hs_read(uport, UART_DM_MR1);
1223 data &= ~(UARTDM_MR1_CTS_CTL_BMSK | UARTDM_MR1_RX_RDY_CTL_BMSK);
1224 if (c_cflag & CRTSCTS) {
1225 data |= UARTDM_MR1_CTS_CTL_BMSK;
1226 data |= UARTDM_MR1_RX_RDY_CTL_BMSK;
1227 msm_uport->flow_control = true;
1228 }
1229 msm_hs_write(uport, UART_DM_MR1, data);
1230 MSM_HS_INFO("%s: Cflags 0x%x Baud %u\n", __func__, c_cflag, bps);
1231
1232 mutex_unlock(&msm_uport->mtx);
1233
1234 msm_hs_resource_unvote(msm_uport);
1235}
1236
1237/*
1238 * Standard API, Transmitter
1239 * Any character in the transmit shift register is sent
1240 */
1241unsigned int msm_hs_tx_empty(struct uart_port *uport)
1242{
1243 unsigned int data;
1244 unsigned int isr;
1245 unsigned int ret = 0;
1246 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1247
1248 msm_hs_resource_vote(msm_uport);
1249 data = msm_hs_read(uport, UART_DM_SR);
1250 isr = msm_hs_read(uport, UART_DM_ISR);
1251 msm_hs_resource_unvote(msm_uport);
1252 MSM_HS_INFO("%s(): SR:0x%x ISR:0x%x ", __func__, data, isr);
1253
1254 if (data & UARTDM_SR_TXEMT_BMSK) {
1255 ret = TIOCSER_TEMT;
1256 } else
1257 /*
1258 * Add an extra sleep here because sometimes the framework's
1259 * delay (based on baud rate) isn't good enough.
1260 * Note that this won't happen during every port close, only
1261 * on select occassions when the userspace does back to back
1262 * write() and close().
1263 */
1264 usleep_range(5000, 7000);
1265
1266 return ret;
1267}
1268EXPORT_SYMBOL(msm_hs_tx_empty);
1269
1270/*
1271 * Standard API, Stop transmitter.
1272 * Any character in the transmit shift register is sent as
1273 * well as the current data mover transfer .
1274 */
1275static void msm_hs_stop_tx_locked(struct uart_port *uport)
1276{
1277 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1278 struct msm_hs_tx *tx = &msm_uport->tx;
1279
1280 tx->flush = FLUSH_STOP;
1281}
1282
1283static int disconnect_rx_endpoint(struct msm_hs_port *msm_uport)
1284{
1285 struct msm_hs_rx *rx = &msm_uport->rx;
1286 struct sps_pipe *sps_pipe_handle = rx->prod.pipe_handle;
1287 int ret = 0;
1288
1289 ret = sps_rx_disconnect(sps_pipe_handle);
1290
1291 if (msm_uport->rx.pending_flag)
1292 MSM_HS_WARN("%s(): Buffers may be pending 0x%lx",
1293 __func__, msm_uport->rx.pending_flag);
1294 MSM_HS_DBG("%s(): clearing desc usage flag", __func__);
1295 msm_uport->rx.queued_flag = 0;
1296 msm_uport->rx.pending_flag = 0;
1297 msm_uport->rx.rx_inx = 0;
1298
1299 if (ret)
1300 MSM_HS_ERR("%s(): sps_disconnect failed\n", __func__);
1301 msm_uport->rx.flush = FLUSH_SHUTDOWN;
1302 MSM_HS_DBG("%s: Calling Completion\n", __func__);
1303 wake_up(&msm_uport->bam_disconnect_wait);
1304 MSM_HS_DBG("%s: Done Completion\n", __func__);
1305 wake_up(&msm_uport->rx.wait);
1306 return ret;
1307}
1308
1309static int sps_tx_disconnect(struct msm_hs_port *msm_uport)
1310{
1311 struct uart_port *uport = &msm_uport->uport;
1312 struct msm_hs_tx *tx = &msm_uport->tx;
1313 struct sps_pipe *tx_pipe = tx->cons.pipe_handle;
1314 unsigned long flags;
1315 int ret = 0;
1316
1317 if (msm_uport->tx.flush == FLUSH_SHUTDOWN) {
1318 MSM_HS_DBG("%s(): pipe already disonnected", __func__);
1319 return ret;
1320 }
1321
1322 ret = sps_disconnect(tx_pipe);
1323
1324 if (ret) {
1325 MSM_HS_ERR("%s(): sps_disconnect failed %d", __func__, ret);
1326 return ret;
1327 }
1328
1329 spin_lock_irqsave(&uport->lock, flags);
1330 msm_uport->tx.flush = FLUSH_SHUTDOWN;
1331 spin_unlock_irqrestore(&uport->lock, flags);
1332
1333 MSM_HS_DBG("%s(): TX Disconnect", __func__);
1334 return ret;
1335}
1336
1337static void msm_hs_disable_rx(struct uart_port *uport)
1338{
1339 unsigned int data;
1340
1341 data = msm_hs_read(uport, UART_DM_DMEN);
1342 data &= ~UARTDM_RX_BAM_ENABLE_BMSK;
1343 msm_hs_write(uport, UART_DM_DMEN, data);
1344}
1345
1346/*
1347 * Standard API, Stop receiver as soon as possible.
1348 *
1349 * Function immediately terminates the operation of the
1350 * channel receiver and any incoming characters are lost. None
1351 * of the receiver status bits are affected by this command and
1352 * characters that are already in the receive FIFO there.
1353 */
1354static void msm_hs_stop_rx_locked(struct uart_port *uport)
1355{
1356 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1357
1358 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE)
1359 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
1360 else
1361 msm_hs_disable_rx(uport);
1362
1363 if (msm_uport->rx.flush == FLUSH_NONE)
1364 msm_uport->rx.flush = FLUSH_STOP;
1365}
1366
1367static void msm_hs_disconnect_rx(struct uart_port *uport)
1368{
1369 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1370
1371 msm_hs_disable_rx(uport);
1372 /* Disconnect the BAM RX pipe */
1373 if (msm_uport->rx.flush == FLUSH_NONE)
1374 msm_uport->rx.flush = FLUSH_STOP;
1375 disconnect_rx_endpoint(msm_uport);
1376 MSM_HS_DBG("%s(): rx->flush %d", __func__, msm_uport->rx.flush);
1377}
1378
1379/* Tx timeout callback function */
1380void tx_timeout_handler(unsigned long arg)
1381{
1382 struct msm_hs_port *msm_uport = (struct msm_hs_port *) arg;
1383 struct uart_port *uport = &msm_uport->uport;
1384 int isr;
1385
1386 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
1387 MSM_HS_WARN("%s(): clocks are off", __func__);
1388 return;
1389 }
1390
1391 isr = msm_hs_read(uport, UART_DM_ISR);
1392 if (UARTDM_ISR_CURRENT_CTS_BMSK & isr)
1393 MSM_HS_WARN("%s(): CTS Disabled, ISR 0x%x", __func__, isr);
1394 dump_uart_hs_registers(msm_uport);
1395}
1396
1397/* Transmit the next chunk of data */
1398static void msm_hs_submit_tx_locked(struct uart_port *uport)
1399{
1400 int left;
1401 int tx_count;
1402 int aligned_tx_count;
1403 dma_addr_t src_addr;
1404 dma_addr_t aligned_src_addr;
1405 u32 flags = SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_INT;
1406 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1407 struct msm_hs_tx *tx = &msm_uport->tx;
1408 struct circ_buf *tx_buf = &msm_uport->uport.state->xmit;
1409 struct sps_pipe *sps_pipe_handle;
1410 int ret;
1411
1412 if (uart_circ_empty(tx_buf) || uport->state->port.tty->stopped) {
1413 tx->dma_in_flight = false;
1414 msm_hs_stop_tx_locked(uport);
1415 return;
1416 }
1417
1418 tx_count = uart_circ_chars_pending(tx_buf);
1419
1420 if (tx_count > UARTDM_TX_BUF_SIZE)
1421 tx_count = UARTDM_TX_BUF_SIZE;
1422
1423 left = UART_XMIT_SIZE - tx_buf->tail;
1424
1425 if (tx_count > left)
1426 tx_count = left;
1427
1428 src_addr = tx->dma_base + tx_buf->tail;
1429 /* Mask the src_addr to align on a cache
1430 * and add those bytes to tx_count
1431 */
1432 aligned_src_addr = src_addr & ~(dma_get_cache_alignment() - 1);
1433 aligned_tx_count = tx_count + src_addr - aligned_src_addr;
1434
1435 dma_sync_single_for_device(uport->dev, aligned_src_addr,
1436 aligned_tx_count, DMA_TO_DEVICE);
1437
1438 tx->tx_count = tx_count;
1439
1440 hex_dump_ipc(msm_uport, tx->ipc_tx_ctxt, "Tx",
1441 &tx_buf->buf[tx_buf->tail], (u64)src_addr, tx_count);
1442 sps_pipe_handle = tx->cons.pipe_handle;
1443
1444 /* Set 1 second timeout */
1445 mod_timer(&tx->tx_timeout_timer,
1446 jiffies + msecs_to_jiffies(MSEC_PER_SEC));
1447 /* Queue transfer request to SPS */
1448 ret = sps_transfer_one(sps_pipe_handle, src_addr, tx_count,
1449 msm_uport, flags);
1450
1451 MSM_HS_DBG("%s:Enqueue Tx Cmd, ret %d\n", __func__, ret);
1452}
1453
1454/* This function queues the rx descriptor for BAM transfer */
1455static void msm_hs_post_rx_desc(struct msm_hs_port *msm_uport, int inx)
1456{
1457 u32 flags = SPS_IOVEC_FLAG_INT;
1458 struct msm_hs_rx *rx = &msm_uport->rx;
1459 int ret;
1460
1461 phys_addr_t rbuff_addr = rx->rbuffer + (UARTDM_RX_BUF_SIZE * inx);
1462 u8 *virt_addr = rx->buffer + (UARTDM_RX_BUF_SIZE * inx);
1463
1464 MSM_HS_DBG("%s: %d:Queue desc %d, 0x%llx, base 0x%llx virtaddr %p",
1465 __func__, msm_uport->uport.line, inx,
1466 (u64)rbuff_addr, (u64)rx->rbuffer, virt_addr);
1467
1468 rx->iovec[inx].size = 0;
1469 ret = sps_transfer_one(rx->prod.pipe_handle, rbuff_addr,
1470 UARTDM_RX_BUF_SIZE, msm_uport, flags);
1471
1472 if (ret)
1473 MSM_HS_ERR("Error processing descriptor %d", ret);
1474}
1475
1476/* Update the rx descriptor index to specify the next one to be processed */
1477static void msm_hs_mark_next(struct msm_hs_port *msm_uport, int inx)
1478{
1479 struct msm_hs_rx *rx = &msm_uport->rx;
1480 int prev;
1481
1482 inx %= UART_DMA_DESC_NR;
1483 MSM_HS_DBG("%s(): inx %d, pending 0x%lx", __func__, inx,
1484 rx->pending_flag);
1485
1486 if (!inx)
1487 prev = UART_DMA_DESC_NR - 1;
1488 else
1489 prev = inx - 1;
1490
1491 if (!test_bit(prev, &rx->pending_flag))
1492 msm_uport->rx.rx_inx = inx;
1493 MSM_HS_DBG("%s(): prev %d pending flag 0x%lx, next %d", __func__,
1494 prev, rx->pending_flag, msm_uport->rx.rx_inx);
1495}
1496
1497/*
1498 * Queue the rx descriptor that has just been processed or
1499 * all of them if queueing for the first time
1500 */
1501static void msm_hs_queue_rx_desc(struct msm_hs_port *msm_uport)
1502{
1503 struct msm_hs_rx *rx = &msm_uport->rx;
1504 int i, flag = 0;
1505
1506 /* At first, queue all, if not, queue only one */
1507 if (rx->queued_flag || rx->pending_flag) {
1508 if (!test_bit(rx->rx_inx, &rx->queued_flag) &&
1509 !test_bit(rx->rx_inx, &rx->pending_flag)) {
1510 msm_hs_post_rx_desc(msm_uport, rx->rx_inx);
1511 set_bit(rx->rx_inx, &rx->queued_flag);
1512 MSM_HS_DBG("%s(): Set Queued Bit %d",
1513 __func__, rx->rx_inx);
1514 } else
1515 MSM_HS_ERR("%s(): rx_inx pending or queued", __func__);
1516 return;
1517 }
1518
1519 for (i = 0; i < UART_DMA_DESC_NR; i++) {
1520 if (!test_bit(i, &rx->queued_flag) &&
1521 !test_bit(i, &rx->pending_flag)) {
1522 MSM_HS_DBG("%s(): Calling post rx %d", __func__, i);
1523 msm_hs_post_rx_desc(msm_uport, i);
1524 set_bit(i, &rx->queued_flag);
1525 flag = 1;
1526 }
1527 }
1528
1529 if (!flag)
1530 MSM_HS_ERR("%s(): error queueing descriptor", __func__);
1531}
1532
1533/* Start to receive the next chunk of data */
1534static void msm_hs_start_rx_locked(struct uart_port *uport)
1535{
1536 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1537 struct msm_hs_rx *rx = &msm_uport->rx;
1538 unsigned int buffer_pending = msm_uport->rx.buffer_pending;
1539 unsigned int data;
1540
1541 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
1542 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
1543 return;
1544 }
1545 if (rx->pending_flag) {
1546 MSM_HS_INFO("%s: Rx Cmd got executed, wait for rx_tlet\n",
1547 __func__);
1548 rx->flush = FLUSH_IGNORE;
1549 return;
1550 }
1551 if (buffer_pending)
1552 MSM_HS_ERR("Error: rx started in buffer state =%x",
1553 buffer_pending);
1554
1555 msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
1556 msm_hs_write(uport, UART_DM_DMRX, UARTDM_RX_BUF_SIZE);
1557 msm_hs_write(uport, UART_DM_CR, STALE_EVENT_ENABLE);
1558 /*
1559 * Enable UARTDM Rx Interface as previously it has been
1560 * disable in set_termios before configuring baud rate.
1561 */
1562 data = msm_hs_read(uport, UART_DM_DMEN);
1563 /* Enable UARTDM Rx BAM Interface */
1564 data |= UARTDM_RX_BAM_ENABLE_BMSK;
1565
1566 msm_hs_write(uport, UART_DM_DMEN, data);
1567 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
1568 /* Calling next DMOV API. Hence mb() here. */
1569 mb();
1570
1571 /*
1572 * RX-transfer will be automatically re-activated
1573 * after last data of previous transfer was read.
1574 */
1575 data = (RX_STALE_AUTO_RE_EN | RX_TRANS_AUTO_RE_ACTIVATE |
1576 RX_DMRX_CYCLIC_EN);
1577 msm_hs_write(uport, UART_DM_RX_TRANS_CTRL, data);
1578 /* Issue RX BAM Start IFC command */
1579 msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
1580 /* Ensure register IO completion */
1581 mb();
1582
1583 msm_uport->rx.flush = FLUSH_NONE;
1584 msm_uport->rx_bam_inprogress = true;
1585 msm_hs_queue_rx_desc(msm_uport);
1586 msm_uport->rx_bam_inprogress = false;
1587 wake_up(&msm_uport->rx.wait);
1588 MSM_HS_DBG("%s:Enqueue Rx Cmd\n", __func__);
1589}
1590
1591static void flip_insert_work(struct work_struct *work)
1592{
1593 unsigned long flags;
1594 int retval;
1595 struct msm_hs_port *msm_uport =
1596 container_of(work, struct msm_hs_port,
1597 rx.flip_insert_work.work);
1598 struct tty_struct *tty = msm_uport->uport.state->port.tty;
1599
1600 spin_lock_irqsave(&msm_uport->uport.lock, flags);
1601 if (!tty || msm_uport->rx.flush == FLUSH_SHUTDOWN) {
1602 dev_err(msm_uport->uport.dev,
1603 "%s:Invalid driver state flush %d\n",
1604 __func__, msm_uport->rx.flush);
1605 MSM_HS_ERR("%s:Invalid driver state flush %d\n",
1606 __func__, msm_uport->rx.flush);
1607 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1608 return;
1609 }
1610
1611 if (msm_uport->rx.buffer_pending == NONE_PENDING) {
1612 MSM_HS_ERR("Error: No buffer pending in %s", __func__);
1613 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1614 return;
1615 }
1616 if (msm_uport->rx.buffer_pending & FIFO_OVERRUN) {
1617 retval = tty_insert_flip_char(tty->port, 0, TTY_OVERRUN);
1618 if (retval)
1619 msm_uport->rx.buffer_pending &= ~FIFO_OVERRUN;
1620 }
1621 if (msm_uport->rx.buffer_pending & PARITY_ERROR) {
1622 retval = tty_insert_flip_char(tty->port, 0, TTY_PARITY);
1623 if (retval)
1624 msm_uport->rx.buffer_pending &= ~PARITY_ERROR;
1625 }
1626 if (msm_uport->rx.buffer_pending & CHARS_NORMAL) {
1627 int rx_count, rx_offset;
1628
1629 rx_count = (msm_uport->rx.buffer_pending & 0xFFFF0000) >> 16;
1630 rx_offset = (msm_uport->rx.buffer_pending & 0xFFD0) >> 5;
1631 retval = tty_insert_flip_string(tty->port,
1632 msm_uport->rx.buffer +
1633 (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)
1634 + rx_offset, rx_count);
1635 msm_uport->rx.buffer_pending &= (FIFO_OVERRUN |
1636 PARITY_ERROR);
1637 if (retval != rx_count)
1638 msm_uport->rx.buffer_pending |= CHARS_NORMAL |
1639 retval << 8 | (rx_count - retval) << 16;
1640 }
1641 if (msm_uport->rx.buffer_pending) {
1642 schedule_delayed_work(&msm_uport->rx.flip_insert_work,
1643 msecs_to_jiffies(RETRY_TIMEOUT));
1644 } else if (msm_uport->rx.flush <= FLUSH_IGNORE) {
1645 MSM_HS_WARN("Pending buffers cleared, restarting");
1646 clear_bit(msm_uport->rx.rx_inx,
1647 &msm_uport->rx.pending_flag);
1648 msm_hs_start_rx_locked(&msm_uport->uport);
1649 msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
1650 }
1651 spin_unlock_irqrestore(&msm_uport->uport.lock, flags);
1652 tty_flip_buffer_push(tty->port);
1653}
1654
1655static void msm_serial_hs_rx_work(struct kthread_work *work)
1656{
1657 int retval;
1658 int rx_count = 0;
1659 unsigned long status;
1660 unsigned long flags;
1661 unsigned int error_f = 0;
1662 struct uart_port *uport;
1663 struct msm_hs_port *msm_uport;
1664 unsigned int flush = FLUSH_DATA_INVALID;
1665 struct tty_struct *tty;
1666 struct sps_event_notify *notify;
1667 struct msm_hs_rx *rx;
1668 struct sps_pipe *sps_pipe_handle;
1669 struct platform_device *pdev;
1670 const struct msm_serial_hs_platform_data *pdata;
1671
1672 msm_uport = container_of((struct kthread_work *) work,
1673 struct msm_hs_port, rx.kwork);
1674 msm_hs_resource_vote(msm_uport);
1675 uport = &msm_uport->uport;
1676 tty = uport->state->port.tty;
1677 notify = &msm_uport->notify;
1678 rx = &msm_uport->rx;
1679 pdev = to_platform_device(uport->dev);
1680 pdata = pdev->dev.platform_data;
1681
1682 spin_lock_irqsave(&uport->lock, flags);
1683
1684 if (!tty || rx->flush == FLUSH_SHUTDOWN) {
1685 dev_err(uport->dev, "%s:Invalid driver state flush %d\n",
1686 __func__, rx->flush);
1687 MSM_HS_ERR("%s:Invalid driver state flush %d\n",
1688 __func__, rx->flush);
1689 spin_unlock_irqrestore(&uport->lock, flags);
1690 msm_hs_resource_unvote(msm_uport);
1691 return;
1692 }
1693
1694 /*
1695 * Process all pending descs or if nothing is
1696 * queued - called from termios
1697 */
1698 while (!rx->buffer_pending &&
1699 (rx->pending_flag || !rx->queued_flag)) {
1700 MSM_HS_DBG("%s(): Loop P 0x%lx Q 0x%lx", __func__,
1701 rx->pending_flag, rx->queued_flag);
1702
1703 status = msm_hs_read(uport, UART_DM_SR);
1704
1705 MSM_HS_DBG("In %s\n", __func__);
1706
1707 /* overflow is not connect to data in a FIFO */
1708 if (unlikely((status & UARTDM_SR_OVERRUN_BMSK) &&
1709 (uport->read_status_mask & CREAD))) {
1710 retval = tty_insert_flip_char(tty->port,
1711 0, TTY_OVERRUN);
1712 MSM_HS_WARN("%s(): RX Buffer Overrun Detected\n",
1713 __func__);
1714 if (!retval)
1715 msm_uport->rx.buffer_pending |= TTY_OVERRUN;
1716 uport->icount.buf_overrun++;
1717 error_f = 1;
1718 }
1719
1720 if (!(uport->ignore_status_mask & INPCK))
1721 status = status & ~(UARTDM_SR_PAR_FRAME_BMSK);
1722
1723 if (unlikely(status & UARTDM_SR_PAR_FRAME_BMSK)) {
1724 /* Can not tell diff between parity & frame error */
1725 MSM_HS_WARN("msm_serial_hs: parity error\n");
1726 uport->icount.parity++;
1727 error_f = 1;
1728 if (!(uport->ignore_status_mask & IGNPAR)) {
1729 retval = tty_insert_flip_char(tty->port,
1730 0, TTY_PARITY);
1731 if (!retval)
1732 msm_uport->rx.buffer_pending
1733 |= TTY_PARITY;
1734 }
1735 }
1736
1737 if (unlikely(status & UARTDM_SR_RX_BREAK_BMSK)) {
1738 MSM_HS_DBG("msm_serial_hs: Rx break\n");
1739 uport->icount.brk++;
1740 error_f = 1;
1741 if (!(uport->ignore_status_mask & IGNBRK)) {
1742 retval = tty_insert_flip_char(tty->port,
1743 0, TTY_BREAK);
1744 if (!retval)
1745 msm_uport->rx.buffer_pending
1746 |= TTY_BREAK;
1747 }
1748 }
1749
1750 if (error_f)
1751 msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
1752 flush = msm_uport->rx.flush;
1753 if (flush == FLUSH_IGNORE)
1754 if (!msm_uport->rx.buffer_pending) {
1755 MSM_HS_DBG("%s: calling start_rx_locked\n",
1756 __func__);
1757 msm_hs_start_rx_locked(uport);
1758 }
1759 if (flush >= FLUSH_DATA_INVALID)
1760 goto out;
1761
1762 rx_count = msm_uport->rx.iovec[msm_uport->rx.rx_inx].size;
1763 hex_dump_ipc(msm_uport, rx->ipc_rx_ctxt, "Rx",
1764 (msm_uport->rx.buffer +
1765 (msm_uport->rx.rx_inx * UARTDM_RX_BUF_SIZE)),
1766 msm_uport->rx.iovec[msm_uport->rx.rx_inx].addr,
1767 rx_count);
1768
1769 /*
1770 * We are in a spin locked context, spin lock taken at
1771 * other places where these flags are updated
1772 */
1773 if (0 != (uport->read_status_mask & CREAD)) {
1774 if (!test_bit(msm_uport->rx.rx_inx,
1775 &msm_uport->rx.pending_flag) &&
1776 !test_bit(msm_uport->rx.rx_inx,
1777 &msm_uport->rx.queued_flag))
1778 MSM_HS_ERR("%s: RX INX not set", __func__);
1779 else if (test_bit(msm_uport->rx.rx_inx,
1780 &msm_uport->rx.pending_flag) &&
1781 !test_bit(msm_uport->rx.rx_inx,
1782 &msm_uport->rx.queued_flag)) {
1783 MSM_HS_DBG("%s(): Clear Pending Bit %d",
1784 __func__, msm_uport->rx.rx_inx);
1785
1786 retval = tty_insert_flip_string(tty->port,
1787 msm_uport->rx.buffer +
1788 (msm_uport->rx.rx_inx *
1789 UARTDM_RX_BUF_SIZE),
1790 rx_count);
1791
1792 if (retval != rx_count) {
1793 MSM_HS_INFO("%s(): ret %d rx_count %d",
1794 __func__, retval, rx_count);
1795 msm_uport->rx.buffer_pending |=
1796 CHARS_NORMAL | retval << 5 |
1797 (rx_count - retval) << 16;
1798 }
1799 } else
1800 MSM_HS_ERR("%s: Error in inx %d", __func__,
1801 msm_uport->rx.rx_inx);
1802 }
1803
1804 if (!msm_uport->rx.buffer_pending) {
1805 msm_uport->rx.flush = FLUSH_NONE;
1806 msm_uport->rx_bam_inprogress = true;
1807 sps_pipe_handle = rx->prod.pipe_handle;
1808 MSM_HS_DBG("Queing bam descriptor\n");
1809 /* Queue transfer request to SPS */
1810 clear_bit(msm_uport->rx.rx_inx,
1811 &msm_uport->rx.pending_flag);
1812 msm_hs_queue_rx_desc(msm_uport);
1813 msm_hs_mark_next(msm_uport, msm_uport->rx.rx_inx+1);
1814 msm_hs_write(uport, UART_DM_CR, START_RX_BAM_IFC);
1815 msm_uport->rx_bam_inprogress = false;
1816 wake_up(&msm_uport->rx.wait);
1817 } else
1818 break;
1819
1820 }
1821out:
1822 if (msm_uport->rx.buffer_pending) {
1823 MSM_HS_WARN("%s: tty buffer exhausted. Stalling\n", __func__);
1824 schedule_delayed_work(&msm_uport->rx.flip_insert_work
1825 , msecs_to_jiffies(RETRY_TIMEOUT));
1826 }
1827 /* tty_flip_buffer_push() might call msm_hs_start(), so unlock */
1828 spin_unlock_irqrestore(&uport->lock, flags);
1829 if (flush < FLUSH_DATA_INVALID)
1830 tty_flip_buffer_push(tty->port);
1831 msm_hs_resource_unvote(msm_uport);
1832}
1833
1834static void msm_hs_start_tx_locked(struct uart_port *uport)
1835{
1836 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
1837 struct msm_hs_tx *tx = &msm_uport->tx;
1838
1839 /* Bail if transfer in progress */
1840 if (tx->flush < FLUSH_STOP || tx->dma_in_flight) {
1841 MSM_HS_INFO("%s(): retry, flush %d, dma_in_flight %d\n",
1842 __func__, tx->flush, tx->dma_in_flight);
1843 return;
1844 }
1845
1846 if (!tx->dma_in_flight) {
1847 tx->dma_in_flight = true;
1848 kthread_queue_work(&msm_uport->tx.kworker,
1849 &msm_uport->tx.kwork);
1850 }
1851}
1852
1853/**
1854 * Callback notification from SPS driver
1855 *
1856 * This callback function gets triggered called from
1857 * SPS driver when requested SPS data transfer is
1858 * completed.
1859 *
1860 */
1861
1862static void msm_hs_sps_tx_callback(struct sps_event_notify *notify)
1863{
1864 struct msm_hs_port *msm_uport =
1865 (struct msm_hs_port *)
1866 ((struct sps_event_notify *)notify)->user;
1867 phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
1868 notify->data.transfer.iovec.addr);
1869
1870 msm_uport->notify = *notify;
1871 MSM_HS_INFO("tx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
1872 &addr, notify->data.transfer.iovec.size,
1873 notify->data.transfer.iovec.flags);
1874
1875 del_timer(&msm_uport->tx.tx_timeout_timer);
1876 MSM_HS_DBG("%s(): Queue kthread work", __func__);
1877 kthread_queue_work(&msm_uport->tx.kworker, &msm_uport->tx.kwork);
1878}
1879
1880static void msm_serial_hs_tx_work(struct kthread_work *work)
1881{
1882 unsigned long flags;
1883 struct msm_hs_port *msm_uport =
1884 container_of((struct kthread_work *)work,
1885 struct msm_hs_port, tx.kwork);
1886 struct uart_port *uport = &msm_uport->uport;
1887 struct circ_buf *tx_buf = &uport->state->xmit;
1888 struct msm_hs_tx *tx = &msm_uport->tx;
1889
1890 /*
1891 * Do the work buffer related work in BAM
1892 * mode that is equivalent to legacy mode
1893 */
1894 msm_hs_resource_vote(msm_uport);
1895 if (tx->flush >= FLUSH_STOP) {
1896 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
1897 tx->flush = FLUSH_NONE;
1898 MSM_HS_DBG("%s(): calling submit_tx", __func__);
1899 msm_hs_submit_tx_locked(uport);
1900 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1901 msm_hs_resource_unvote(msm_uport);
1902 return;
1903 }
1904
1905 spin_lock_irqsave(&(msm_uport->uport.lock), flags);
1906 if (!uart_circ_empty(tx_buf))
1907 tx_buf->tail = (tx_buf->tail +
1908 tx->tx_count) & ~UART_XMIT_SIZE;
1909 else
1910 MSM_HS_DBG("%s:circ buffer is empty\n", __func__);
1911
1912 wake_up(&msm_uport->tx.wait);
1913
1914 uport->icount.tx += tx->tx_count;
1915
1916 /*
1917 * Calling to send next chunk of data
1918 * If the circ buffer is empty, we stop
1919 * If the clock off was requested, the clock
1920 * off sequence is kicked off
1921 */
1922 MSM_HS_DBG("%s(): calling submit_tx", __func__);
1923 msm_hs_submit_tx_locked(uport);
1924
1925 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
1926 uart_write_wakeup(uport);
1927
1928 spin_unlock_irqrestore(&(msm_uport->uport.lock), flags);
1929 msm_hs_resource_unvote(msm_uport);
1930}
1931
1932static void
1933msm_hs_mark_proc_rx_desc(struct msm_hs_port *msm_uport,
1934 struct sps_event_notify *notify)
1935{
1936 struct msm_hs_rx *rx = &msm_uport->rx;
1937 phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
1938 notify->data.transfer.iovec.addr);
1939 /* divide by UARTDM_RX_BUF_SIZE */
1940 int inx = (addr - rx->rbuffer) >> 9;
1941
1942 set_bit(inx, &rx->pending_flag);
1943 clear_bit(inx, &rx->queued_flag);
1944 rx->iovec[inx] = notify->data.transfer.iovec;
1945 MSM_HS_DBG("Clear Q, Set P Bit %d, Q 0x%lx P 0x%lx",
1946 inx, rx->queued_flag, rx->pending_flag);
1947}
1948
1949/**
1950 * Callback notification from SPS driver
1951 *
1952 * This callback function gets triggered called from
1953 * SPS driver when requested SPS data transfer is
1954 * completed.
1955 *
1956 */
1957
1958static void msm_hs_sps_rx_callback(struct sps_event_notify *notify)
1959{
1960
1961 struct msm_hs_port *msm_uport =
1962 (struct msm_hs_port *)
1963 ((struct sps_event_notify *)notify)->user;
1964 struct uart_port *uport;
1965 unsigned long flags;
1966 struct msm_hs_rx *rx = &msm_uport->rx;
1967 phys_addr_t addr = DESC_FULL_ADDR(notify->data.transfer.iovec.flags,
1968 notify->data.transfer.iovec.addr);
1969 /* divide by UARTDM_RX_BUF_SIZE */
1970 int inx = (addr - rx->rbuffer) >> 9;
1971
1972 uport = &(msm_uport->uport);
1973 msm_uport->notify = *notify;
1974 MSM_HS_INFO("rx_cb: addr=0x%pa, size=0x%x, flags=0x%x\n",
1975 &addr, notify->data.transfer.iovec.size,
1976 notify->data.transfer.iovec.flags);
1977
1978 spin_lock_irqsave(&uport->lock, flags);
1979 msm_hs_mark_proc_rx_desc(msm_uport, notify);
1980 spin_unlock_irqrestore(&uport->lock, flags);
1981
1982 if (msm_uport->rx.flush == FLUSH_NONE) {
1983 /* Test if others are queued */
1984 if (msm_uport->rx.pending_flag & ~(1 << inx)) {
1985 MSM_HS_DBG("%s(): inx 0x%x, 0x%lx not processed",
1986 __func__, inx,
1987 msm_uport->rx.pending_flag & ~(1<<inx));
1988 }
1989 kthread_queue_work(&msm_uport->rx.kworker,
1990 &msm_uport->rx.kwork);
1991 MSM_HS_DBG("%s(): Scheduled rx_tlet", __func__);
1992 }
1993}
1994
1995/*
1996 * Standard API, Current states of modem control inputs
1997 *
1998 * Since CTS can be handled entirely by HARDWARE we always
1999 * indicate clear to send and count on the TX FIFO to block when
2000 * it fills up.
2001 *
2002 * - TIOCM_DCD
2003 * - TIOCM_CTS
2004 * - TIOCM_DSR
2005 * - TIOCM_RI
2006 * (Unsupported) DCD and DSR will return them high. RI will return low.
2007 */
2008static unsigned int msm_hs_get_mctrl_locked(struct uart_port *uport)
2009{
2010 return TIOCM_DSR | TIOCM_CAR | TIOCM_CTS;
2011}
2012
2013/*
2014 * Standard API, Set or clear RFR_signal
2015 *
2016 * Set RFR high, (Indicate we are not ready for data), we disable auto
2017 * ready for receiving and then set RFR_N high. To set RFR to low we just turn
2018 * back auto ready for receiving and it should lower RFR signal
2019 * when hardware is ready
2020 */
2021void msm_hs_set_mctrl_locked(struct uart_port *uport,
2022 unsigned int mctrl)
2023{
2024 unsigned int set_rts;
2025 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2026
2027 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
2028 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
2029 return;
2030 }
2031 /* RTS is active low */
2032 set_rts = TIOCM_RTS & mctrl ? 0 : 1;
2033 MSM_HS_INFO("%s: set_rts %d\n", __func__, set_rts);
2034
2035 if (set_rts)
2036 msm_hs_disable_flow_control(uport, false);
2037 else
2038 msm_hs_enable_flow_control(uport, false);
2039}
2040
2041void msm_hs_set_mctrl(struct uart_port *uport,
2042 unsigned int mctrl)
2043{
2044 unsigned long flags;
2045 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2046
2047 msm_hs_resource_vote(msm_uport);
2048 spin_lock_irqsave(&uport->lock, flags);
2049 msm_hs_set_mctrl_locked(uport, mctrl);
2050 spin_unlock_irqrestore(&uport->lock, flags);
2051 msm_hs_resource_unvote(msm_uport);
2052}
2053EXPORT_SYMBOL(msm_hs_set_mctrl);
2054
2055/* Standard API, Enable modem status (CTS) interrupt */
2056static void msm_hs_enable_ms_locked(struct uart_port *uport)
2057{
2058 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2059
2060 if (msm_uport->pm_state != MSM_HS_PM_ACTIVE) {
2061 MSM_HS_WARN("%s(): Clocks are off\n", __func__);
2062 return;
2063 }
2064
2065 /* Enable DELTA_CTS Interrupt */
2066 msm_uport->imr_reg |= UARTDM_ISR_DELTA_CTS_BMSK;
2067 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
2068 /* Ensure register IO completion */
2069 mb();
2070
2071}
2072
2073/*
2074 * Standard API, Break Signal
2075 *
2076 * Control the transmission of a break signal. ctl eq 0 => break
2077 * signal terminate ctl ne 0 => start break signal
2078 */
2079static void msm_hs_break_ctl(struct uart_port *uport, int ctl)
2080{
2081 unsigned long flags;
2082 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2083
2084 msm_hs_resource_vote(msm_uport);
2085 spin_lock_irqsave(&uport->lock, flags);
2086 msm_hs_write(uport, UART_DM_CR, ctl ? START_BREAK : STOP_BREAK);
2087 /* Ensure register IO completion */
2088 mb();
2089 spin_unlock_irqrestore(&uport->lock, flags);
2090 msm_hs_resource_unvote(msm_uport);
2091}
2092
2093static void msm_hs_config_port(struct uart_port *uport, int cfg_flags)
2094{
2095 if (cfg_flags & UART_CONFIG_TYPE)
2096 uport->type = PORT_MSM;
2097
2098}
2099
2100/* Handle CTS changes (Called from interrupt handler) */
2101static void msm_hs_handle_delta_cts_locked(struct uart_port *uport)
2102{
2103 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2104
2105 msm_hs_resource_vote(msm_uport);
2106 /* clear interrupt */
2107 msm_hs_write(uport, UART_DM_CR, RESET_CTS);
2108 /* Calling CLOCK API. Hence mb() requires here. */
2109 mb();
2110 uport->icount.cts++;
2111
2112 /* clear the IOCTL TIOCMIWAIT if called */
2113 wake_up_interruptible(&uport->state->port.delta_msr_wait);
2114 msm_hs_resource_unvote(msm_uport);
2115}
2116
2117static irqreturn_t msm_hs_isr(int irq, void *dev)
2118{
2119 unsigned long flags;
2120 unsigned int isr_status;
2121 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
2122 struct uart_port *uport = &msm_uport->uport;
2123 struct circ_buf *tx_buf = &uport->state->xmit;
2124 struct msm_hs_tx *tx = &msm_uport->tx;
2125
2126 spin_lock_irqsave(&uport->lock, flags);
2127
2128 isr_status = msm_hs_read(uport, UART_DM_MISR);
2129 MSM_HS_INFO("%s: DM_ISR: 0x%x\n", __func__, isr_status);
2130 dump_uart_hs_registers(msm_uport);
2131
2132 /* Uart RX starting */
2133 if (isr_status & UARTDM_ISR_RXLEV_BMSK) {
2134 MSM_HS_DBG("%s:UARTDM_ISR_RXLEV_BMSK\n", __func__);
2135 msm_uport->imr_reg &= ~UARTDM_ISR_RXLEV_BMSK;
2136 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
2137 /* Complete device write for IMR. Hence mb() requires. */
2138 mb();
2139 }
2140 /* Stale rx interrupt */
2141 if (isr_status & UARTDM_ISR_RXSTALE_BMSK) {
2142 msm_hs_write(uport, UART_DM_CR, STALE_EVENT_DISABLE);
2143 msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
2144 /*
2145 * Complete device write before calling DMOV API. Hence
2146 * mb() requires here.
2147 */
2148 mb();
2149 MSM_HS_DBG("%s:Stal Interrupt\n", __func__);
2150 }
2151 /* tx ready interrupt */
2152 if (isr_status & UARTDM_ISR_TX_READY_BMSK) {
2153 MSM_HS_DBG("%s: ISR_TX_READY Interrupt\n", __func__);
2154 /* Clear TX Ready */
2155 msm_hs_write(uport, UART_DM_CR, CLEAR_TX_READY);
2156
2157 /*
2158 * Complete both writes before starting new TX.
2159 * Hence mb() requires here.
2160 */
2161 mb();
2162 /* Complete DMA TX transactions and submit new transactions */
2163
2164 /* Do not update tx_buf.tail if uart_flush_buffer already
2165 * called in serial core
2166 */
2167 if (!uart_circ_empty(tx_buf))
2168 tx_buf->tail = (tx_buf->tail +
2169 tx->tx_count) & ~UART_XMIT_SIZE;
2170
2171 tx->dma_in_flight = false;
2172
2173 uport->icount.tx += tx->tx_count;
2174
2175 if (uart_circ_chars_pending(tx_buf) < WAKEUP_CHARS)
2176 uart_write_wakeup(uport);
2177 }
2178 if (isr_status & UARTDM_ISR_TXLEV_BMSK) {
2179 /* TX FIFO is empty */
2180 msm_uport->imr_reg &= ~UARTDM_ISR_TXLEV_BMSK;
2181 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
2182 MSM_HS_DBG("%s: TXLEV Interrupt\n", __func__);
2183 /*
2184 * Complete device write before starting clock_off request.
2185 * Hence mb() requires here.
2186 */
2187 mb();
2188 queue_work(msm_uport->hsuart_wq, &msm_uport->clock_off_w);
2189 }
2190
2191 /* Change in CTS interrupt */
2192 if (isr_status & UARTDM_ISR_DELTA_CTS_BMSK)
2193 msm_hs_handle_delta_cts_locked(uport);
2194
2195 spin_unlock_irqrestore(&uport->lock, flags);
2196
2197 return IRQ_HANDLED;
2198}
2199
2200/* The following two functions provide interfaces to get the underlying
2201 * port structure (struct uart_port or struct msm_hs_port) given
2202 * the port index. msm_hs_get_uart port is called by clients.
2203 * The function msm_hs_get_hs_port is for internal use
2204 */
2205
2206struct uart_port *msm_hs_get_uart_port(int port_index)
2207{
2208 struct uart_state *state = msm_hs_driver.state + port_index;
2209
2210 /* The uart_driver structure stores the states in an array.
2211 * Thus the corresponding offset from the drv->state returns
2212 * the state for the uart_port that is requested
2213 */
2214 if (port_index == state->uart_port->line)
2215 return state->uart_port;
2216
2217 return NULL;
2218}
2219EXPORT_SYMBOL(msm_hs_get_uart_port);
2220
2221static struct msm_hs_port *msm_hs_get_hs_port(int port_index)
2222{
2223 struct uart_port *uport = msm_hs_get_uart_port(port_index);
2224
2225 if (uport)
2226 return UARTDM_TO_MSM(uport);
2227 return NULL;
2228}
2229
2230void enable_wakeup_interrupt(struct msm_hs_port *msm_uport)
2231{
2232 unsigned long flags;
2233 struct uart_port *uport = &(msm_uport->uport);
2234
2235 if (!is_use_low_power_wakeup(msm_uport))
2236 return;
2237 if (msm_uport->wakeup.freed)
2238 return;
2239
2240 if (!(msm_uport->wakeup.enabled)) {
2241 spin_lock_irqsave(&uport->lock, flags);
2242 msm_uport->wakeup.ignore = 1;
2243 msm_uport->wakeup.enabled = true;
2244 spin_unlock_irqrestore(&uport->lock, flags);
2245 disable_irq(uport->irq);
2246 enable_irq(msm_uport->wakeup.irq);
2247 } else {
2248 MSM_HS_WARN("%s:Wake up IRQ already enabled", __func__);
2249 }
2250}
2251
2252void disable_wakeup_interrupt(struct msm_hs_port *msm_uport)
2253{
2254 unsigned long flags;
2255 struct uart_port *uport = &(msm_uport->uport);
2256
2257 if (!is_use_low_power_wakeup(msm_uport))
2258 return;
2259 if (msm_uport->wakeup.freed)
2260 return;
2261
2262 if (msm_uport->wakeup.enabled) {
2263 disable_irq_nosync(msm_uport->wakeup.irq);
2264 enable_irq(uport->irq);
2265 spin_lock_irqsave(&uport->lock, flags);
2266 msm_uport->wakeup.enabled = false;
2267 spin_unlock_irqrestore(&uport->lock, flags);
2268 } else {
2269 MSM_HS_WARN("%s:Wake up IRQ already disabled", __func__);
2270 }
2271}
2272
2273void msm_hs_resource_off(struct msm_hs_port *msm_uport)
2274{
2275 struct uart_port *uport = &(msm_uport->uport);
2276 unsigned int data;
2277
2278 MSM_HS_DBG("%s(): begin", __func__);
2279 msm_hs_disable_flow_control(uport, false);
2280 if (msm_uport->rx.flush == FLUSH_NONE)
2281 msm_hs_disconnect_rx(uport);
2282
2283 /* disable dlink */
2284 if (msm_uport->tx.flush == FLUSH_NONE)
2285 wait_event_timeout(msm_uport->tx.wait,
2286 msm_uport->tx.flush == FLUSH_STOP, 500);
2287
2288 if (msm_uport->tx.flush != FLUSH_SHUTDOWN) {
2289 data = msm_hs_read(uport, UART_DM_DMEN);
2290 data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
2291 msm_hs_write(uport, UART_DM_DMEN, data);
2292 sps_tx_disconnect(msm_uport);
2293 }
2294 if (!atomic_read(&msm_uport->client_req_state))
2295 msm_hs_enable_flow_control(uport, false);
2296}
2297
2298void msm_hs_resource_on(struct msm_hs_port *msm_uport)
2299{
2300 struct uart_port *uport = &(msm_uport->uport);
2301 unsigned int data;
2302 unsigned long flags;
2303
2304 if (msm_uport->rx.flush == FLUSH_SHUTDOWN ||
2305 msm_uport->rx.flush == FLUSH_STOP) {
2306 msm_hs_write(uport, UART_DM_CR, RESET_RX);
2307 data = msm_hs_read(uport, UART_DM_DMEN);
2308 data |= UARTDM_RX_BAM_ENABLE_BMSK;
2309 msm_hs_write(uport, UART_DM_DMEN, data);
2310 }
2311
2312 msm_hs_spsconnect_tx(msm_uport);
2313 if (msm_uport->rx.flush == FLUSH_SHUTDOWN) {
2314 msm_hs_spsconnect_rx(uport);
2315 spin_lock_irqsave(&uport->lock, flags);
2316 msm_hs_start_rx_locked(uport);
2317 spin_unlock_irqrestore(&uport->lock, flags);
2318 }
2319}
2320
2321/* Request to turn off uart clock once pending TX is flushed */
2322int msm_hs_request_clock_off(struct uart_port *uport)
2323{
2324 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2325 int ret = 0;
2326 int client_count = 0;
2327
2328 mutex_lock(&msm_uport->mtx);
2329 /*
2330 * If we're in the middle of a system suspend, don't process these
2331 * userspace/kernel API commands.
2332 */
2333 if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
2334 MSM_HS_WARN("%s:Can't process clk request during suspend",
2335 __func__);
2336 ret = -EIO;
2337 }
2338 mutex_unlock(&msm_uport->mtx);
2339 if (ret)
2340 goto exit_request_clock_off;
2341
2342 if (atomic_read(&msm_uport->client_count) <= 0) {
2343 MSM_HS_WARN("%s(): ioctl count -ve, client check voting",
2344 __func__);
2345 ret = -EPERM;
2346 goto exit_request_clock_off;
2347 }
2348 /* Set the flag to disable flow control and wakeup irq */
2349 if (msm_uport->obs)
2350 atomic_set(&msm_uport->client_req_state, 1);
2351 msm_hs_resource_unvote(msm_uport);
2352 atomic_dec(&msm_uport->client_count);
2353 client_count = atomic_read(&msm_uport->client_count);
2354 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
2355 "%s: Client_Count %d\n", __func__,
2356 client_count);
2357exit_request_clock_off:
2358 return ret;
2359}
2360EXPORT_SYMBOL(msm_hs_request_clock_off);
2361
2362int msm_hs_request_clock_on(struct uart_port *uport)
2363{
2364 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2365 int client_count;
2366 int ret = 0;
2367
2368 mutex_lock(&msm_uport->mtx);
2369 /*
2370 * If we're in the middle of a system suspend, don't process these
2371 * userspace/kernel API commands.
2372 */
2373 if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED) {
2374 MSM_HS_WARN("%s:Can't process clk request during suspend",
2375 __func__);
2376 ret = -EIO;
2377 }
2378 mutex_unlock(&msm_uport->mtx);
2379 if (ret)
2380 goto exit_request_clock_on;
2381
2382 msm_hs_resource_vote(UARTDM_TO_MSM(uport));
2383 atomic_inc(&msm_uport->client_count);
2384 client_count = atomic_read(&msm_uport->client_count);
2385 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
2386 "%s: Client_Count %d\n", __func__,
2387 client_count);
2388
2389 /* Clear the flag */
2390 if (msm_uport->obs)
2391 atomic_set(&msm_uport->client_req_state, 0);
2392exit_request_clock_on:
2393 return ret;
2394}
2395EXPORT_SYMBOL(msm_hs_request_clock_on);
2396
2397static irqreturn_t msm_hs_wakeup_isr(int irq, void *dev)
2398{
2399 unsigned int wakeup = 0;
2400 unsigned long flags;
2401 struct msm_hs_port *msm_uport = (struct msm_hs_port *)dev;
2402 struct uart_port *uport = &msm_uport->uport;
2403 struct tty_struct *tty = NULL;
2404
2405 spin_lock_irqsave(&uport->lock, flags);
2406
2407 if (msm_uport->wakeup.ignore)
2408 msm_uport->wakeup.ignore = 0;
2409 else
2410 wakeup = 1;
2411
2412 if (wakeup) {
2413 /*
2414 * Port was clocked off during rx, wake up and
2415 * optionally inject char into tty rx
2416 */
2417 if (msm_uport->wakeup.inject_rx) {
2418 tty = uport->state->port.tty;
2419 tty_insert_flip_char(tty->port,
2420 msm_uport->wakeup.rx_to_inject,
2421 TTY_NORMAL);
2422 hex_dump_ipc(msm_uport, msm_uport->rx.ipc_rx_ctxt,
2423 "Rx Inject",
2424 &msm_uport->wakeup.rx_to_inject, 0, 1);
2425 MSM_HS_INFO("Wakeup ISR.Ignore%d\n",
2426 msm_uport->wakeup.ignore);
2427 }
2428 }
2429
2430 spin_unlock_irqrestore(&uport->lock, flags);
2431
2432 if (wakeup && msm_uport->wakeup.inject_rx)
2433 tty_flip_buffer_push(tty->port);
2434 return IRQ_HANDLED;
2435}
2436
2437static const char *msm_hs_type(struct uart_port *port)
2438{
2439 return "MSM HS UART";
2440}
2441
2442/**
2443 * msm_hs_unconfig_uart_gpios: Unconfigures UART GPIOs
2444 * @uport: uart port
2445 */
2446static void msm_hs_unconfig_uart_gpios(struct uart_port *uport)
2447{
2448 struct platform_device *pdev = to_platform_device(uport->dev);
2449 const struct msm_serial_hs_platform_data *pdata =
2450 pdev->dev.platform_data;
2451 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2452 int ret;
2453
2454 if (msm_uport->use_pinctrl) {
2455 ret = pinctrl_select_state(msm_uport->pinctrl,
2456 msm_uport->gpio_state_suspend);
2457 if (ret)
2458 MSM_HS_ERR("%s():Failed to pinctrl set_state",
2459 __func__);
2460 } else if (pdata) {
2461 if (gpio_is_valid(pdata->uart_tx_gpio))
2462 gpio_free(pdata->uart_tx_gpio);
2463 if (gpio_is_valid(pdata->uart_rx_gpio))
2464 gpio_free(pdata->uart_rx_gpio);
2465 if (gpio_is_valid(pdata->uart_cts_gpio))
2466 gpio_free(pdata->uart_cts_gpio);
2467 if (gpio_is_valid(pdata->uart_rfr_gpio))
2468 gpio_free(pdata->uart_rfr_gpio);
2469 } else
2470 MSM_HS_ERR("Error:Pdata is NULL.\n");
2471}
2472
2473/**
2474 * msm_hs_config_uart_gpios - Configures UART GPIOs
2475 * @uport: uart port
2476 */
2477static int msm_hs_config_uart_gpios(struct uart_port *uport)
2478{
2479 struct platform_device *pdev = to_platform_device(uport->dev);
2480 const struct msm_serial_hs_platform_data *pdata =
2481 pdev->dev.platform_data;
2482 int ret = 0;
2483 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2484
2485 if (!IS_ERR_OR_NULL(msm_uport->pinctrl)) {
2486 MSM_HS_DBG("%s(): Using Pinctrl", __func__);
2487 msm_uport->use_pinctrl = true;
2488 ret = pinctrl_select_state(msm_uport->pinctrl,
2489 msm_uport->gpio_state_active);
2490 if (ret)
2491 MSM_HS_ERR("%s(): Failed to pinctrl set_state",
2492 __func__);
2493 return ret;
2494 } else if (pdata) {
2495 /* Fall back to using gpio lib */
2496 if (gpio_is_valid(pdata->uart_tx_gpio)) {
2497 ret = gpio_request(pdata->uart_tx_gpio,
2498 "UART_TX_GPIO");
2499 if (unlikely(ret)) {
2500 MSM_HS_ERR("gpio request failed for:%d\n",
2501 pdata->uart_tx_gpio);
2502 goto exit_uart_config;
2503 }
2504 }
2505
2506 if (gpio_is_valid(pdata->uart_rx_gpio)) {
2507 ret = gpio_request(pdata->uart_rx_gpio,
2508 "UART_RX_GPIO");
2509 if (unlikely(ret)) {
2510 MSM_HS_ERR("gpio request failed for:%d\n",
2511 pdata->uart_rx_gpio);
2512 goto uart_tx_unconfig;
2513 }
2514 }
2515
2516 if (gpio_is_valid(pdata->uart_cts_gpio)) {
2517 ret = gpio_request(pdata->uart_cts_gpio,
2518 "UART_CTS_GPIO");
2519 if (unlikely(ret)) {
2520 MSM_HS_ERR("gpio request failed for:%d\n",
2521 pdata->uart_cts_gpio);
2522 goto uart_rx_unconfig;
2523 }
2524 }
2525
2526 if (gpio_is_valid(pdata->uart_rfr_gpio)) {
2527 ret = gpio_request(pdata->uart_rfr_gpio,
2528 "UART_RFR_GPIO");
2529 if (unlikely(ret)) {
2530 MSM_HS_ERR("gpio request failed for:%d\n",
2531 pdata->uart_rfr_gpio);
2532 goto uart_cts_unconfig;
2533 }
2534 }
2535 } else {
2536 MSM_HS_ERR("Pdata is NULL.\n");
2537 ret = -EINVAL;
2538 }
2539 return ret;
2540
2541uart_cts_unconfig:
2542 if (gpio_is_valid(pdata->uart_cts_gpio))
2543 gpio_free(pdata->uart_cts_gpio);
2544uart_rx_unconfig:
2545 if (gpio_is_valid(pdata->uart_rx_gpio))
2546 gpio_free(pdata->uart_rx_gpio);
2547uart_tx_unconfig:
2548 if (gpio_is_valid(pdata->uart_tx_gpio))
2549 gpio_free(pdata->uart_tx_gpio);
2550exit_uart_config:
2551 return ret;
2552}
2553
2554
2555static void msm_hs_get_pinctrl_configs(struct uart_port *uport)
2556{
2557 struct pinctrl_state *set_state;
2558 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2559
2560 msm_uport->pinctrl = devm_pinctrl_get(uport->dev);
2561 if (IS_ERR_OR_NULL(msm_uport->pinctrl)) {
2562 MSM_HS_DBG("%s(): Pinctrl not defined", __func__);
2563 } else {
2564 MSM_HS_DBG("%s(): Using Pinctrl", __func__);
2565 msm_uport->use_pinctrl = true;
2566
2567 set_state = pinctrl_lookup_state(msm_uport->pinctrl,
2568 PINCTRL_STATE_DEFAULT);
2569 if (IS_ERR_OR_NULL(set_state)) {
2570 dev_err(uport->dev,
2571 "pinctrl lookup failed for default state");
2572 goto pinctrl_fail;
2573 }
2574
2575 MSM_HS_DBG("%s(): Pinctrl state active %p\n", __func__,
2576 set_state);
2577 msm_uport->gpio_state_active = set_state;
2578
2579 set_state = pinctrl_lookup_state(msm_uport->pinctrl,
2580 PINCTRL_STATE_SLEEP);
2581 if (IS_ERR_OR_NULL(set_state)) {
2582 dev_err(uport->dev,
2583 "pinctrl lookup failed for sleep state");
2584 goto pinctrl_fail;
2585 }
2586
2587 MSM_HS_DBG("%s(): Pinctrl state sleep %p\n", __func__,
2588 set_state);
2589 msm_uport->gpio_state_suspend = set_state;
2590 return;
2591 }
2592pinctrl_fail:
2593 msm_uport->pinctrl = NULL;
2594}
2595
2596/* Called when port is opened */
2597static int msm_hs_startup(struct uart_port *uport)
2598{
2599 int ret;
2600 int rfr_level;
2601 unsigned long flags;
2602 unsigned int data;
2603 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2604 struct circ_buf *tx_buf = &uport->state->xmit;
2605 struct msm_hs_tx *tx = &msm_uport->tx;
2606 struct msm_hs_rx *rx = &msm_uport->rx;
2607 struct sps_pipe *sps_pipe_handle_tx = tx->cons.pipe_handle;
2608 struct sps_pipe *sps_pipe_handle_rx = rx->prod.pipe_handle;
2609
2610 rfr_level = uport->fifosize;
2611 if (rfr_level > 16)
2612 rfr_level -= 16;
2613
2614 tx->dma_base = dma_map_single(uport->dev, tx_buf->buf, UART_XMIT_SIZE,
2615 DMA_TO_DEVICE);
2616
2617 /* turn on uart clk */
2618 msm_hs_resource_vote(msm_uport);
2619
Mukesh Kumar Savaliya8fa1c822018-03-27 00:00:35 +05302620 /* Set up Uart Receive */
2621 msm_hs_write(uport, UART_DM_RFWR, 32);
2622 /* Write to BADR explicitly to set up FIFO sizes */
2623 msm_hs_write(uport, UARTDM_BADR_ADDR, 64);
2624
2625 /* configure the CR Protection to Enable */
2626 msm_hs_write(uport, UART_DM_CR, CR_PROTECTION_EN);
2627
2628 /*
2629 * Enable Command register protection before going ahead as this hw
2630 * configuration makes sure that issued cmd to CR register gets complete
2631 * before next issued cmd start. Hence mb() requires here.
2632 */
2633 mb();
2634
2635 /*
2636 * Set RX_BREAK_ZERO_CHAR_OFF and RX_ERROR_CHAR_OFF
2637 * so any rx_break and character having parity of framing
2638 * error don't enter inside UART RX FIFO.
2639 */
2640 data = msm_hs_read(uport, UART_DM_MR2);
2641 data |= (UARTDM_MR2_RX_BREAK_ZERO_CHAR_OFF |
2642 UARTDM_MR2_RX_ERROR_CHAR_OFF);
2643 msm_hs_write(uport, UART_DM_MR2, data);
2644 /* Ensure register IO completion */
2645 mb();
2646
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05302647 if (is_use_low_power_wakeup(msm_uport)) {
2648 ret = request_threaded_irq(msm_uport->wakeup.irq, NULL,
2649 msm_hs_wakeup_isr,
2650 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
2651 "msm_hs_wakeup", msm_uport);
2652 if (unlikely(ret)) {
2653 MSM_HS_ERR("%s():Err getting uart wakeup_irq %d\n",
2654 __func__, ret);
2655 goto unvote_exit;
2656 }
2657
2658 msm_uport->wakeup.freed = false;
2659 disable_irq(msm_uport->wakeup.irq);
2660 msm_uport->wakeup.enabled = false;
2661
2662 ret = irq_set_irq_wake(msm_uport->wakeup.irq, 1);
2663 if (unlikely(ret)) {
2664 MSM_HS_ERR("%s():Err setting wakeup irq\n", __func__);
2665 goto free_uart_irq;
2666 }
2667 }
2668
2669 ret = msm_hs_config_uart_gpios(uport);
2670 if (ret) {
2671 MSM_HS_ERR("Uart GPIO request failed\n");
2672 goto free_uart_irq;
2673 }
2674
2675 msm_hs_write(uport, UART_DM_DMEN, 0);
2676
2677 /* Connect TX */
2678 sps_tx_disconnect(msm_uport);
2679 ret = msm_hs_spsconnect_tx(msm_uport);
2680 if (ret) {
2681 MSM_HS_ERR("msm_serial_hs: SPS connect failed for TX");
2682 goto unconfig_uart_gpios;
2683 }
2684
2685 /* Connect RX */
2686 kthread_flush_worker(&msm_uport->rx.kworker);
2687 if (rx->flush != FLUSH_SHUTDOWN)
2688 disconnect_rx_endpoint(msm_uport);
2689 ret = msm_hs_spsconnect_rx(uport);
2690 if (ret) {
2691 MSM_HS_ERR("msm_serial_hs: SPS connect failed for RX");
2692 goto sps_disconnect_tx;
2693 }
2694
2695 data = (UARTDM_BCR_TX_BREAK_DISABLE | UARTDM_BCR_STALE_IRQ_EMPTY |
2696 UARTDM_BCR_RX_DMRX_LOW_EN | UARTDM_BCR_RX_STAL_IRQ_DMRX_EQL |
2697 UARTDM_BCR_RX_DMRX_1BYTE_RES_EN);
2698 msm_hs_write(uport, UART_DM_BCR, data);
2699
2700 /* Set auto RFR Level */
2701 data = msm_hs_read(uport, UART_DM_MR1);
2702 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK;
2703 data &= ~UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK;
2704 data |= (UARTDM_MR1_AUTO_RFR_LEVEL1_BMSK & (rfr_level << 2));
2705 data |= (UARTDM_MR1_AUTO_RFR_LEVEL0_BMSK & rfr_level);
2706 msm_hs_write(uport, UART_DM_MR1, data);
2707
2708 /* Make sure RXSTALE count is non-zero */
2709 data = msm_hs_read(uport, UART_DM_IPR);
2710 if (!data) {
2711 data |= 0x1f & UARTDM_IPR_STALE_LSB_BMSK;
2712 msm_hs_write(uport, UART_DM_IPR, data);
2713 }
2714
2715 /* Assume no flow control, unless termios sets it */
2716 msm_uport->flow_control = false;
2717 msm_hs_disable_flow_control(uport, true);
2718
2719
2720 /* Reset TX */
2721 msm_hs_write(uport, UART_DM_CR, RESET_TX);
2722 msm_hs_write(uport, UART_DM_CR, RESET_RX);
2723 msm_hs_write(uport, UART_DM_CR, RESET_ERROR_STATUS);
2724 msm_hs_write(uport, UART_DM_CR, RESET_BREAK_INT);
2725 msm_hs_write(uport, UART_DM_CR, RESET_STALE_INT);
2726 msm_hs_write(uport, UART_DM_CR, RESET_CTS);
2727 msm_hs_write(uport, UART_DM_CR, RFR_LOW);
2728 /* Turn on Uart Receiver */
2729 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_EN_BMSK);
2730
2731 /* Turn on Uart Transmitter */
2732 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_EN_BMSK);
2733
2734 tx->dma_in_flight = false;
2735 MSM_HS_DBG("%s():desc usage flag 0x%lx", __func__, rx->queued_flag);
2736 setup_timer(&(tx->tx_timeout_timer),
2737 tx_timeout_handler,
2738 (unsigned long) msm_uport);
2739
2740 /* Enable reading the current CTS, no harm even if CTS is ignored */
2741 msm_uport->imr_reg |= UARTDM_ISR_CURRENT_CTS_BMSK;
2742
2743 /* TXLEV on empty TX fifo */
2744 msm_hs_write(uport, UART_DM_TFWR, 4);
2745 /*
2746 * Complete all device write related configuration before
2747 * queuing RX request. Hence mb() requires here.
2748 */
2749 mb();
2750
2751 ret = request_irq(uport->irq, msm_hs_isr, IRQF_TRIGGER_HIGH,
2752 "msm_hs_uart", msm_uport);
2753 if (unlikely(ret)) {
2754 MSM_HS_ERR("%s():Error %d getting uart irq\n", __func__, ret);
2755 goto sps_disconnect_rx;
2756 }
2757
2758
2759 spin_lock_irqsave(&uport->lock, flags);
2760 atomic_set(&msm_uport->client_count, 0);
2761 atomic_set(&msm_uport->client_req_state, 0);
2762 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
2763 "%s: Client_Count 0\n", __func__);
2764 msm_hs_start_rx_locked(uport);
2765
2766 spin_unlock_irqrestore(&uport->lock, flags);
2767
2768 msm_hs_resource_unvote(msm_uport);
2769 return 0;
2770
2771sps_disconnect_rx:
2772 sps_disconnect(sps_pipe_handle_rx);
2773sps_disconnect_tx:
2774 sps_disconnect(sps_pipe_handle_tx);
2775unconfig_uart_gpios:
2776 msm_hs_unconfig_uart_gpios(uport);
2777free_uart_irq:
2778 free_irq(uport->irq, msm_uport);
2779unvote_exit:
2780 msm_hs_resource_unvote(msm_uport);
2781 MSM_HS_ERR("%s(): Error return\n", __func__);
2782 return ret;
2783}
2784
2785/* Initialize tx and rx data structures */
2786static int uartdm_init_port(struct uart_port *uport)
2787{
2788 int ret = 0;
2789 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
2790 struct msm_hs_tx *tx = &msm_uport->tx;
2791 struct msm_hs_rx *rx = &msm_uport->rx;
2792
2793 init_waitqueue_head(&rx->wait);
2794 init_waitqueue_head(&tx->wait);
2795 init_waitqueue_head(&msm_uport->bam_disconnect_wait);
2796
2797 /* Init kernel threads for tx and rx */
2798
2799 kthread_init_worker(&rx->kworker);
2800 rx->task = kthread_run(kthread_worker_fn,
2801 &rx->kworker, "msm_serial_hs_%d_rx_work", uport->line);
2802 if (IS_ERR(rx->task)) {
2803 MSM_HS_ERR("%s(): error creating task", __func__);
2804 goto exit_lh_init;
2805 }
2806 kthread_init_work(&rx->kwork, msm_serial_hs_rx_work);
2807
2808 kthread_init_worker(&tx->kworker);
2809 tx->task = kthread_run(kthread_worker_fn,
2810 &tx->kworker, "msm_serial_hs_%d_tx_work", uport->line);
2811 if (IS_ERR(rx->task)) {
2812 MSM_HS_ERR("%s(): error creating task", __func__);
2813 goto exit_lh_init;
2814 }
2815
2816 kthread_init_work(&tx->kwork, msm_serial_hs_tx_work);
2817
2818 rx->buffer = dma_alloc_coherent(uport->dev,
2819 UART_DMA_DESC_NR * UARTDM_RX_BUF_SIZE,
2820 &rx->rbuffer, GFP_KERNEL);
2821 if (!rx->buffer) {
2822 MSM_HS_ERR("%s(): cannot allocate rx->buffer", __func__);
2823 ret = -ENOMEM;
2824 goto exit_lh_init;
2825 }
2826
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05302827
2828 INIT_DELAYED_WORK(&rx->flip_insert_work, flip_insert_work);
2829
2830 return ret;
2831exit_lh_init:
2832 kthread_stop(rx->task);
2833 rx->task = NULL;
2834 kthread_stop(tx->task);
2835 tx->task = NULL;
2836 return ret;
2837}
2838
2839struct msm_serial_hs_platform_data
2840 *msm_hs_dt_to_pdata(struct platform_device *pdev)
2841{
2842 struct device_node *node = pdev->dev.of_node;
2843 struct msm_serial_hs_platform_data *pdata;
2844 u32 rx_to_inject;
2845 int ret;
2846
2847 pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
2848 if (!pdata)
2849 return ERR_PTR(-ENOMEM);
2850
2851 pdev->id = of_alias_get_id(pdev->dev.of_node, "uart");
2852 /* UART TX GPIO */
2853 pdata->uart_tx_gpio = of_get_named_gpio(node,
2854 "qcom,tx-gpio", 0);
2855 if (pdata->uart_tx_gpio < 0)
2856 pr_err("uart_tx_gpio is not available\n");
2857
2858 /* UART RX GPIO */
2859 pdata->uart_rx_gpio = of_get_named_gpio(node,
2860 "qcom,rx-gpio", 0);
2861 if (pdata->uart_rx_gpio < 0)
2862 pr_err("uart_rx_gpio is not available\n");
2863
2864 /* UART CTS GPIO */
2865 pdata->uart_cts_gpio = of_get_named_gpio(node,
2866 "qcom,cts-gpio", 0);
2867 if (pdata->uart_cts_gpio < 0)
2868 pr_err("uart_cts_gpio is not available\n");
2869
2870 /* UART RFR GPIO */
2871 pdata->uart_rfr_gpio = of_get_named_gpio(node,
2872 "qcom,rfr-gpio", 0);
2873 if (pdata->uart_rfr_gpio < 0)
2874 pr_err("uart_rfr_gpio is not available\n");
2875
2876 pdata->no_suspend_delay = of_property_read_bool(node,
2877 "qcom,no-suspend-delay");
2878
2879 pdata->obs = of_property_read_bool(node,
2880 "qcom,msm-obs");
2881 if (pdata->obs)
2882 pr_err("%s:Out of Band sleep flag is set\n", __func__);
2883
2884 pdata->inject_rx_on_wakeup = of_property_read_bool(node,
2885 "qcom,inject-rx-on-wakeup");
2886
2887 if (pdata->inject_rx_on_wakeup) {
2888 ret = of_property_read_u32(node, "qcom,rx-char-to-inject",
2889 &rx_to_inject);
2890 if (ret < 0) {
2891 pr_err("Error: Rx_char_to_inject not specified.\n");
2892 return ERR_PTR(ret);
2893 }
2894 pdata->rx_to_inject = (u8)rx_to_inject;
2895 }
2896
2897 ret = of_property_read_u32(node, "qcom,bam-tx-ep-pipe-index",
2898 &pdata->bam_tx_ep_pipe_index);
2899 if (ret < 0) {
2900 pr_err("Error: Getting UART BAM TX EP Pipe Index.\n");
2901 return ERR_PTR(ret);
2902 }
2903
2904 if (!(pdata->bam_tx_ep_pipe_index >= BAM_PIPE_MIN &&
2905 pdata->bam_tx_ep_pipe_index <= BAM_PIPE_MAX)) {
2906 pr_err("Error: Invalid UART BAM TX EP Pipe Index.\n");
2907 return ERR_PTR(-EINVAL);
2908 }
2909
2910 ret = of_property_read_u32(node, "qcom,bam-rx-ep-pipe-index",
2911 &pdata->bam_rx_ep_pipe_index);
2912 if (ret < 0) {
2913 pr_err("Error: Getting UART BAM RX EP Pipe Index.\n");
2914 return ERR_PTR(ret);
2915 }
2916
2917 if (!(pdata->bam_rx_ep_pipe_index >= BAM_PIPE_MIN &&
2918 pdata->bam_rx_ep_pipe_index <= BAM_PIPE_MAX)) {
2919 pr_err("Error: Invalid UART BAM RX EP Pipe Index.\n");
2920 return ERR_PTR(-EINVAL);
2921 }
2922
2923 pr_debug("tx_ep_pipe_index:%d rx_ep_pipe_index:%d\n"
2924 "tx_gpio:%d rx_gpio:%d rfr_gpio:%d cts_gpio:%d",
2925 pdata->bam_tx_ep_pipe_index, pdata->bam_rx_ep_pipe_index,
2926 pdata->uart_tx_gpio, pdata->uart_rx_gpio, pdata->uart_cts_gpio,
2927 pdata->uart_rfr_gpio);
2928
2929 return pdata;
2930}
2931
2932
2933/**
2934 * Deallocate UART peripheral's SPS endpoint
2935 * @msm_uport - Pointer to msm_hs_port structure
2936 * @ep - Pointer to sps endpoint data structure
2937 */
2938
2939static void msm_hs_exit_ep_conn(struct msm_hs_port *msm_uport,
2940 struct msm_hs_sps_ep_conn_data *ep)
2941{
2942 struct sps_pipe *sps_pipe_handle = ep->pipe_handle;
2943 struct sps_connect *sps_config = &ep->config;
2944
2945 dma_free_coherent(msm_uport->uport.dev,
2946 sps_config->desc.size,
2947 &sps_config->desc.phys_base,
2948 GFP_KERNEL);
2949 sps_free_endpoint(sps_pipe_handle);
2950}
2951
2952
2953/**
2954 * Allocate UART peripheral's SPS endpoint
2955 *
2956 * This function allocates endpoint context
2957 * by calling appropriate SPS driver APIs.
2958 *
2959 * @msm_uport - Pointer to msm_hs_port structure
2960 * @ep - Pointer to sps endpoint data structure
2961 * @is_produce - 1 means Producer endpoint
2962 * - 0 means Consumer endpoint
2963 *
2964 * @return - 0 if successful else negative value
2965 */
2966
2967static int msm_hs_sps_init_ep_conn(struct msm_hs_port *msm_uport,
2968 struct msm_hs_sps_ep_conn_data *ep,
2969 bool is_producer)
2970{
2971 int rc = 0;
2972 struct sps_pipe *sps_pipe_handle;
2973 struct sps_connect *sps_config = &ep->config;
2974 struct sps_register_event *sps_event = &ep->event;
2975
2976 /* Allocate endpoint context */
2977 sps_pipe_handle = sps_alloc_endpoint();
2978 if (!sps_pipe_handle) {
2979 MSM_HS_ERR("%s(): sps_alloc_endpoint() failed!!\n"
2980 "is_producer=%d", __func__, is_producer);
2981 rc = -ENOMEM;
2982 goto out;
2983 }
2984
2985 /* Get default connection configuration for an endpoint */
2986 rc = sps_get_config(sps_pipe_handle, sps_config);
2987 if (rc) {
2988 MSM_HS_ERR("%s(): failed! pipe_handle=0x%p rc=%d",
2989 __func__, sps_pipe_handle, rc);
2990 goto get_config_err;
2991 }
2992
2993 /* Modify the default connection configuration */
2994 if (is_producer) {
2995 /* For UART producer transfer, source is UART peripheral
2996 * where as destination is system memory
2997 */
2998 sps_config->source = msm_uport->bam_handle;
2999 sps_config->destination = SPS_DEV_HANDLE_MEM;
3000 sps_config->mode = SPS_MODE_SRC;
3001 sps_config->src_pipe_index = msm_uport->bam_rx_ep_pipe_index;
3002 sps_config->dest_pipe_index = 0;
3003 sps_event->callback = msm_hs_sps_rx_callback;
3004 } else {
3005 /* For UART consumer transfer, source is system memory
3006 * where as destination is UART peripheral
3007 */
3008 sps_config->source = SPS_DEV_HANDLE_MEM;
3009 sps_config->destination = msm_uport->bam_handle;
3010 sps_config->mode = SPS_MODE_DEST;
3011 sps_config->src_pipe_index = 0;
3012 sps_config->dest_pipe_index = msm_uport->bam_tx_ep_pipe_index;
3013 sps_event->callback = msm_hs_sps_tx_callback;
3014 }
3015
3016 sps_config->options = SPS_O_EOT | SPS_O_DESC_DONE | SPS_O_AUTO_ENABLE;
3017 sps_config->event_thresh = 0x10;
3018
3019 /* Allocate maximum descriptor fifo size */
3020 sps_config->desc.size =
3021 (1 + UART_DMA_DESC_NR) * sizeof(struct sps_iovec);
3022 sps_config->desc.base = dma_alloc_coherent(msm_uport->uport.dev,
3023 sps_config->desc.size,
3024 &sps_config->desc.phys_base,
3025 GFP_KERNEL);
3026 if (!sps_config->desc.base) {
3027 rc = -ENOMEM;
3028 MSM_HS_ERR("msm_serial_hs: dma_alloc_coherent() failed!!\n");
3029 goto get_config_err;
3030 }
3031 memset(sps_config->desc.base, 0x00, sps_config->desc.size);
3032
3033 sps_event->mode = SPS_TRIGGER_CALLBACK;
3034
3035 sps_event->options = SPS_O_DESC_DONE | SPS_O_EOT;
3036 sps_event->user = (void *)msm_uport;
3037
3038 /* Now save the sps pipe handle */
3039 ep->pipe_handle = sps_pipe_handle;
3040 MSM_HS_DBG("msm_serial_hs: success !! %s: pipe_handle=0x%p\n"
3041 "desc_fifo.phys_base=0x%pa\n",
3042 is_producer ? "READ" : "WRITE",
3043 sps_pipe_handle, &sps_config->desc.phys_base);
3044 return 0;
3045
3046get_config_err:
3047 sps_free_endpoint(sps_pipe_handle);
3048out:
3049 return rc;
3050}
3051
3052/**
3053 * Initialize SPS HW connected with UART core
3054 *
3055 * This function register BAM HW resources with
3056 * SPS driver and then initialize 2 SPS endpoints
3057 *
3058 * msm_uport - Pointer to msm_hs_port structure
3059 *
3060 * @return - 0 if successful else negative value
3061 */
3062
3063static int msm_hs_sps_init(struct msm_hs_port *msm_uport)
3064{
3065 int rc = 0;
3066 struct sps_bam_props bam = {0};
3067 unsigned long bam_handle;
3068
3069 rc = sps_phy2h(msm_uport->bam_mem, &bam_handle);
3070 if (rc || !bam_handle) {
3071 bam.phys_addr = msm_uport->bam_mem;
3072 bam.virt_addr = msm_uport->bam_base;
3073 /*
3074 * This event thresold value is only significant for BAM-to-BAM
3075 * transfer. It's ignored for BAM-to-System mode transfer.
3076 */
3077 bam.event_threshold = 0x10; /* Pipe event threshold */
3078 bam.summing_threshold = 1; /* BAM event threshold */
3079
3080 /* SPS driver wll handle the UART BAM IRQ */
3081 bam.irq = (u32)msm_uport->bam_irq;
3082 bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
3083
3084 MSM_HS_DBG("msm_serial_hs: bam physical base=0x%pa\n",
3085 &bam.phys_addr);
3086 MSM_HS_DBG("msm_serial_hs: bam virtual base=0x%p\n",
3087 bam.virt_addr);
3088
3089 /* Register UART Peripheral BAM device to SPS driver */
3090 rc = sps_register_bam_device(&bam, &bam_handle);
3091 if (rc) {
3092 MSM_HS_ERR("%s: BAM device register failed\n",
3093 __func__);
3094 return rc;
3095 }
3096 MSM_HS_DBG("%s:BAM device registered. bam_handle=0x%lx",
3097 __func__, msm_uport->bam_handle);
3098 }
3099 msm_uport->bam_handle = bam_handle;
3100
3101 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->rx.prod,
3102 UART_SPS_PROD_PERIPHERAL);
3103 if (rc) {
3104 MSM_HS_ERR("%s: Failed to Init Producer BAM-pipe", __func__);
3105 goto deregister_bam;
3106 }
3107
3108 rc = msm_hs_sps_init_ep_conn(msm_uport, &msm_uport->tx.cons,
3109 UART_SPS_CONS_PERIPHERAL);
3110 if (rc) {
3111 MSM_HS_ERR("%s: Failed to Init Consumer BAM-pipe", __func__);
3112 goto deinit_ep_conn_prod;
3113 }
3114 return 0;
3115
3116deinit_ep_conn_prod:
3117 msm_hs_exit_ep_conn(msm_uport, &msm_uport->rx.prod);
3118deregister_bam:
3119 sps_deregister_bam_device(msm_uport->bam_handle);
3120 return rc;
3121}
3122
3123
3124static bool deviceid[UARTDM_NR] = {0};
3125/*
3126 * The mutex synchronizes grabbing next free device number
3127 * both in case of an alias being used or not. When alias is
3128 * used, the msm_hs_dt_to_pdata gets it and the boolean array
3129 * is accordingly updated with device_id_set_used. If no alias
3130 * is used, then device_id_grab_next_free sets that array.
3131 */
3132static DEFINE_MUTEX(mutex_next_device_id);
3133
3134static int device_id_grab_next_free(void)
3135{
3136 int i;
3137 int ret = -ENODEV;
3138
3139 mutex_lock(&mutex_next_device_id);
3140 for (i = 0; i < UARTDM_NR; i++)
3141 if (!deviceid[i]) {
3142 ret = i;
3143 deviceid[i] = true;
3144 break;
3145 }
3146 mutex_unlock(&mutex_next_device_id);
3147 return ret;
3148}
3149
3150static int device_id_set_used(int index)
3151{
3152 int ret = 0;
3153
3154 mutex_lock(&mutex_next_device_id);
3155 if (deviceid[index])
3156 ret = -ENODEV;
3157 else
3158 deviceid[index] = true;
3159 mutex_unlock(&mutex_next_device_id);
3160 return ret;
3161}
3162
3163static void obs_manage_irq(struct msm_hs_port *msm_uport, bool en)
3164{
3165 struct uart_port *uport = &(msm_uport->uport);
3166
3167 if (msm_uport->obs) {
3168 if (en)
3169 enable_irq(uport->irq);
3170 else
3171 disable_irq(uport->irq);
3172 }
3173}
3174
3175static void msm_hs_pm_suspend(struct device *dev)
3176{
3177 struct platform_device *pdev = to_platform_device(dev);
3178 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3179 int ret;
3180 int client_count = 0;
3181
3182 if (!msm_uport)
3183 goto err_suspend;
3184 mutex_lock(&msm_uport->mtx);
3185
3186 client_count = atomic_read(&msm_uport->client_count);
3187 msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
3188 msm_hs_resource_off(msm_uport);
3189 obs_manage_irq(msm_uport, false);
3190 msm_hs_clk_bus_unvote(msm_uport);
3191
3192 /* For OBS, don't use wakeup interrupt, set gpio to suspended state */
3193 if (msm_uport->obs) {
3194 ret = pinctrl_select_state(msm_uport->pinctrl,
3195 msm_uport->gpio_state_suspend);
3196 if (ret)
3197 MSM_HS_ERR("%s():Error selecting pinctrl suspend state",
3198 __func__);
3199 }
3200
3201 if (!atomic_read(&msm_uport->client_req_state))
3202 enable_wakeup_interrupt(msm_uport);
3203 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3204 "%s: PM State Suspended client_count %d\n", __func__,
3205 client_count);
3206 mutex_unlock(&msm_uport->mtx);
3207 return;
3208err_suspend:
3209 pr_err("%s(): invalid uport", __func__);
3210}
3211
3212static int msm_hs_pm_resume(struct device *dev)
3213{
3214 struct platform_device *pdev = to_platform_device(dev);
3215 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3216 int ret = 0;
3217 int client_count = 0;
3218
3219 if (!msm_uport) {
3220 dev_err(dev, "%s:Invalid uport\n", __func__);
3221 return -ENODEV;
3222 }
3223
3224 mutex_lock(&msm_uport->mtx);
3225 client_count = atomic_read(&msm_uport->client_count);
3226 if (msm_uport->pm_state == MSM_HS_PM_ACTIVE)
3227 goto exit_pm_resume;
3228 if (!atomic_read(&msm_uport->client_req_state))
3229 disable_wakeup_interrupt(msm_uport);
3230
3231 /* For OBS, don't use wakeup interrupt, set gpio to active state */
3232 if (msm_uport->obs) {
3233 ret = pinctrl_select_state(msm_uport->pinctrl,
3234 msm_uport->gpio_state_active);
3235 if (ret)
3236 MSM_HS_ERR("%s():Error selecting active state",
3237 __func__);
3238 }
3239
3240 ret = msm_hs_clk_bus_vote(msm_uport);
3241 if (ret) {
3242 MSM_HS_ERR("%s:Failed clock vote %d\n", __func__, ret);
3243 dev_err(dev, "%s:Failed clock vote %d\n", __func__, ret);
3244 goto exit_pm_resume;
3245 }
3246 obs_manage_irq(msm_uport, true);
3247 msm_uport->pm_state = MSM_HS_PM_ACTIVE;
3248 msm_hs_resource_on(msm_uport);
3249
3250 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3251 "%s:PM State:Active client_count %d\n", __func__, client_count);
3252exit_pm_resume:
3253 mutex_unlock(&msm_uport->mtx);
3254 return ret;
3255}
3256
3257#ifdef CONFIG_PM
3258static int msm_hs_pm_sys_suspend_noirq(struct device *dev)
3259{
3260 struct platform_device *pdev = to_platform_device(dev);
3261 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3262 enum msm_hs_pm_state prev_pwr_state;
3263 int clk_cnt, client_count, ret = 0;
3264
3265 if (IS_ERR_OR_NULL(msm_uport))
3266 return -ENODEV;
3267
3268 mutex_lock(&msm_uport->mtx);
3269
3270 /*
3271 * If there is an active clk request or an impending userspace request
3272 * fail the suspend callback.
3273 */
3274 clk_cnt = atomic_read(&msm_uport->resource_count);
3275 client_count = atomic_read(&msm_uport->client_count);
3276 if (msm_uport->pm_state == MSM_HS_PM_ACTIVE) {
3277 MSM_HS_WARN("%s:Fail Suspend.clk_cnt:%d,clnt_count:%d\n",
3278 __func__, clk_cnt, client_count);
3279 ret = -EBUSY;
3280 goto exit_suspend_noirq;
3281 }
3282
3283 prev_pwr_state = msm_uport->pm_state;
3284 msm_uport->pm_state = MSM_HS_PM_SYS_SUSPENDED;
3285 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3286 "%s:PM State:Sys-Suspended client_count %d\n", __func__,
3287 client_count);
3288exit_suspend_noirq:
3289 mutex_unlock(&msm_uport->mtx);
3290 return ret;
3291};
3292
3293static int msm_hs_pm_sys_resume_noirq(struct device *dev)
3294{
3295 struct platform_device *pdev = to_platform_device(dev);
3296 struct msm_hs_port *msm_uport = get_matching_hs_port(pdev);
3297
3298 if (IS_ERR_OR_NULL(msm_uport))
3299 return -ENODEV;
3300 /*
3301 * Note system-pm resume and update the state
3302 * variable. Resource activation will be done
3303 * when transfer is requested.
3304 */
3305
3306 mutex_lock(&msm_uport->mtx);
3307 if (msm_uport->pm_state == MSM_HS_PM_SYS_SUSPENDED)
3308 msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
3309 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3310 "%s:PM State: Suspended\n", __func__);
3311 mutex_unlock(&msm_uport->mtx);
3312 return 0;
3313}
3314#endif
3315
3316#ifdef CONFIG_PM
3317static void msm_serial_hs_rt_init(struct uart_port *uport)
3318{
3319 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
3320
3321 MSM_HS_INFO("%s(): Enabling runtime pm", __func__);
3322 pm_runtime_set_suspended(uport->dev);
3323 pm_runtime_set_autosuspend_delay(uport->dev, 100);
3324 pm_runtime_use_autosuspend(uport->dev);
3325 mutex_lock(&msm_uport->mtx);
3326 msm_uport->pm_state = MSM_HS_PM_SUSPENDED;
3327 mutex_unlock(&msm_uport->mtx);
3328 pm_runtime_enable(uport->dev);
3329}
3330
3331static int msm_hs_runtime_suspend(struct device *dev)
3332{
3333 msm_hs_pm_suspend(dev);
3334 return 0;
3335}
3336
3337static int msm_hs_runtime_resume(struct device *dev)
3338{
3339 return msm_hs_pm_resume(dev);
3340}
3341#else
3342static void msm_serial_hs_rt_init(struct uart_port *uport) {}
3343static int msm_hs_runtime_suspend(struct device *dev) {}
3344static int msm_hs_runtime_resume(struct device *dev) {}
3345#endif
3346
3347
3348static int msm_hs_probe(struct platform_device *pdev)
3349{
3350 int ret = 0;
3351 struct uart_port *uport;
3352 struct msm_hs_port *msm_uport;
3353 struct resource *core_resource;
3354 struct resource *bam_resource;
3355 int core_irqres, bam_irqres, wakeup_irqres;
3356 struct msm_serial_hs_platform_data *pdata = pdev->dev.platform_data;
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05303357 char name[30];
3358
3359 if (pdev->dev.of_node) {
3360 dev_dbg(&pdev->dev, "device tree enabled\n");
3361 pdata = msm_hs_dt_to_pdata(pdev);
3362 if (IS_ERR(pdata))
3363 return PTR_ERR(pdata);
3364
3365 if (pdev->id < 0) {
3366 pdev->id = device_id_grab_next_free();
3367 if (pdev->id < 0) {
3368 dev_err(&pdev->dev,
3369 "Error grabbing next free device id");
3370 return pdev->id;
3371 }
3372 } else {
3373 ret = device_id_set_used(pdev->id);
3374 if (ret < 0) {
3375 dev_err(&pdev->dev, "%d alias taken",
3376 pdev->id);
3377 return ret;
3378 }
3379 }
3380 pdev->dev.platform_data = pdata;
3381 }
3382
3383 if (pdev->id < 0 || pdev->id >= UARTDM_NR) {
3384 dev_err(&pdev->dev, "Invalid plaform device ID = %d\n",
3385 pdev->id);
3386 return -EINVAL;
3387 }
3388
3389 msm_uport = devm_kzalloc(&pdev->dev, sizeof(struct msm_hs_port),
3390 GFP_KERNEL);
3391 if (!msm_uport)
3392 return -ENOMEM;
3393
3394 msm_uport->uport.type = PORT_UNKNOWN;
3395 uport = &msm_uport->uport;
3396 uport->dev = &pdev->dev;
3397
3398 if (pdev->dev.of_node)
3399 msm_uport->uart_type = BLSP_HSUART;
3400
3401 msm_hs_get_pinctrl_configs(uport);
3402 /* Get required resources for BAM HSUART */
3403 core_resource = platform_get_resource_byname(pdev,
3404 IORESOURCE_MEM, "core_mem");
3405 if (!core_resource) {
3406 dev_err(&pdev->dev, "Invalid core HSUART Resources.\n");
3407 return -ENXIO;
3408 }
3409 bam_resource = platform_get_resource_byname(pdev,
3410 IORESOURCE_MEM, "bam_mem");
3411 if (!bam_resource) {
3412 dev_err(&pdev->dev, "Invalid BAM HSUART Resources.\n");
3413 return -ENXIO;
3414 }
3415 core_irqres = platform_get_irq_byname(pdev, "core_irq");
3416 if (core_irqres < 0) {
3417 dev_err(&pdev->dev, "Error %d, invalid core irq resources.\n",
3418 core_irqres);
3419 return -ENXIO;
3420 }
3421 bam_irqres = platform_get_irq_byname(pdev, "bam_irq");
3422 if (bam_irqres < 0) {
3423 dev_err(&pdev->dev, "Error %d, invalid bam irq resources.\n",
3424 bam_irqres);
3425 return -ENXIO;
3426 }
3427 wakeup_irqres = platform_get_irq_byname(pdev, "wakeup_irq");
3428 if (wakeup_irqres < 0) {
3429 wakeup_irqres = -1;
3430 pr_info("Wakeup irq not specified.\n");
3431 }
3432
3433 uport->mapbase = core_resource->start;
3434
3435 uport->membase = ioremap(uport->mapbase,
3436 resource_size(core_resource));
3437 if (unlikely(!uport->membase)) {
3438 dev_err(&pdev->dev, "UART Resource ioremap Failed.\n");
3439 return -ENOMEM;
3440 }
3441 msm_uport->bam_mem = bam_resource->start;
3442 msm_uport->bam_base = ioremap(msm_uport->bam_mem,
3443 resource_size(bam_resource));
3444 if (unlikely(!msm_uport->bam_base)) {
3445 dev_err(&pdev->dev, "UART BAM Resource ioremap Failed.\n");
3446 iounmap(uport->membase);
3447 return -ENOMEM;
3448 }
3449
3450 memset(name, 0, sizeof(name));
3451 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3452 "_state");
3453 msm_uport->ipc_msm_hs_log_ctxt =
3454 ipc_log_context_create(IPC_MSM_HS_LOG_STATE_PAGES,
3455 name, 0);
3456 if (!msm_uport->ipc_msm_hs_log_ctxt) {
3457 dev_err(&pdev->dev, "%s: error creating logging context",
3458 __func__);
3459 } else {
3460 msm_uport->ipc_debug_mask = INFO_LEV;
3461 ret = sysfs_create_file(&pdev->dev.kobj,
3462 &dev_attr_debug_mask.attr);
3463 if (unlikely(ret))
3464 MSM_HS_WARN("%s: Failed to create dev. attr", __func__);
3465 }
3466
3467 uport->irq = core_irqres;
3468 msm_uport->bam_irq = bam_irqres;
3469 pdata->wakeup_irq = wakeup_irqres;
3470
3471 msm_uport->bus_scale_table = msm_bus_cl_get_pdata(pdev);
3472 if (!msm_uport->bus_scale_table) {
3473 MSM_HS_ERR("BLSP UART: Bus scaling is disabled.\n");
3474 } else {
3475 msm_uport->bus_perf_client =
3476 msm_bus_scale_register_client
3477 (msm_uport->bus_scale_table);
3478 if (IS_ERR(&msm_uport->bus_perf_client)) {
3479 MSM_HS_ERR("%s():Bus client register failed\n",
3480 __func__);
3481 ret = -EINVAL;
3482 goto unmap_memory;
3483 }
3484 }
3485
3486 msm_uport->wakeup.irq = pdata->wakeup_irq;
3487 msm_uport->wakeup.ignore = 1;
3488 msm_uport->wakeup.inject_rx = pdata->inject_rx_on_wakeup;
3489 msm_uport->wakeup.rx_to_inject = pdata->rx_to_inject;
3490 msm_uport->obs = pdata->obs;
3491
3492 msm_uport->bam_tx_ep_pipe_index =
3493 pdata->bam_tx_ep_pipe_index;
3494 msm_uport->bam_rx_ep_pipe_index =
3495 pdata->bam_rx_ep_pipe_index;
3496 msm_uport->wakeup.enabled = true;
3497
3498 uport->iotype = UPIO_MEM;
3499 uport->fifosize = 64;
3500 uport->ops = &msm_hs_ops;
3501 uport->flags = UPF_BOOT_AUTOCONF;
3502 uport->uartclk = 7372800;
3503 msm_uport->imr_reg = 0x0;
3504
3505 msm_uport->clk = clk_get(&pdev->dev, "core_clk");
3506 if (IS_ERR(msm_uport->clk)) {
3507 ret = PTR_ERR(msm_uport->clk);
3508 goto deregister_bus_client;
3509 }
3510
3511 msm_uport->pclk = clk_get(&pdev->dev, "iface_clk");
3512 /*
3513 * Some configurations do not require explicit pclk control so
3514 * do not flag error on pclk get failure.
3515 */
3516 if (IS_ERR(msm_uport->pclk))
3517 msm_uport->pclk = NULL;
3518
3519 msm_uport->hsuart_wq = alloc_workqueue("k_hsuart",
3520 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
3521 if (!msm_uport->hsuart_wq) {
3522 MSM_HS_ERR("%s(): Unable to create workqueue hsuart_wq\n",
3523 __func__);
3524 ret = -ENOMEM;
3525 goto put_clk;
3526 }
3527
3528 mutex_init(&msm_uport->mtx);
3529
3530 /* Initialize SPS HW connected with UART core */
3531 ret = msm_hs_sps_init(msm_uport);
3532 if (unlikely(ret)) {
3533 MSM_HS_ERR("SPS Initialization failed ! err=%d", ret);
3534 goto destroy_mutex;
3535 }
3536
3537 msm_uport->tx.flush = FLUSH_SHUTDOWN;
3538 msm_uport->rx.flush = FLUSH_SHUTDOWN;
3539
3540 memset(name, 0, sizeof(name));
3541 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3542 "_tx");
3543 msm_uport->tx.ipc_tx_ctxt =
3544 ipc_log_context_create(IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
3545 if (!msm_uport->tx.ipc_tx_ctxt)
3546 dev_err(&pdev->dev, "%s: error creating tx logging context",
3547 __func__);
3548
3549 memset(name, 0, sizeof(name));
3550 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3551 "_rx");
3552 msm_uport->rx.ipc_rx_ctxt = ipc_log_context_create(
3553 IPC_MSM_HS_LOG_DATA_PAGES, name, 0);
3554 if (!msm_uport->rx.ipc_rx_ctxt)
3555 dev_err(&pdev->dev, "%s: error creating rx logging context",
3556 __func__);
3557
3558 memset(name, 0, sizeof(name));
3559 scnprintf(name, sizeof(name), "%s%s", dev_name(msm_uport->uport.dev),
3560 "_pwr");
3561 msm_uport->ipc_msm_hs_pwr_ctxt = ipc_log_context_create(
3562 IPC_MSM_HS_LOG_USER_PAGES, name, 0);
3563 if (!msm_uport->ipc_msm_hs_pwr_ctxt)
3564 dev_err(&pdev->dev, "%s: error creating usr logging context",
3565 __func__);
3566
3567 uport->irq = core_irqres;
3568 msm_uport->bam_irq = bam_irqres;
3569
3570 clk_set_rate(msm_uport->clk, msm_uport->uport.uartclk);
3571 msm_hs_clk_bus_vote(msm_uport);
3572 ret = uartdm_init_port(uport);
3573 if (unlikely(ret))
3574 goto err_clock;
3575
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05303576
3577 ret = sysfs_create_file(&pdev->dev.kobj, &dev_attr_clock.attr);
3578 if (unlikely(ret)) {
3579 MSM_HS_ERR("Probe Failed as sysfs failed\n");
3580 goto err_clock;
3581 }
3582
3583 msm_serial_debugfs_init(msm_uport, pdev->id);
3584 msm_hs_unconfig_uart_gpios(uport);
3585
3586 uport->line = pdev->id;
3587 if (pdata->userid && pdata->userid <= UARTDM_NR)
3588 uport->line = pdata->userid;
3589 ret = uart_add_one_port(&msm_hs_driver, uport);
3590 if (!ret) {
3591 msm_hs_clk_bus_unvote(msm_uport);
3592 msm_serial_hs_rt_init(uport);
3593 return ret;
3594 }
3595
3596err_clock:
3597 msm_hs_clk_bus_unvote(msm_uport);
3598
3599destroy_mutex:
3600 mutex_destroy(&msm_uport->mtx);
3601 destroy_workqueue(msm_uport->hsuart_wq);
3602
3603put_clk:
3604 if (msm_uport->pclk)
3605 clk_put(msm_uport->pclk);
3606
3607 if (msm_uport->clk)
3608 clk_put(msm_uport->clk);
3609
3610deregister_bus_client:
3611 msm_bus_scale_unregister_client(msm_uport->bus_perf_client);
3612unmap_memory:
3613 iounmap(uport->membase);
3614 iounmap(msm_uport->bam_base);
3615
3616 return ret;
3617}
3618
3619static int __init msm_serial_hs_init(void)
3620{
3621 int ret;
3622
3623 ret = uart_register_driver(&msm_hs_driver);
3624 if (unlikely(ret)) {
3625 pr_err("%s failed to load\n", __func__);
3626 return ret;
3627 }
3628 debug_base = debugfs_create_dir("msm_serial_hs", NULL);
3629 if (IS_ERR_OR_NULL(debug_base))
3630 pr_err("msm_serial_hs: Cannot create debugfs dir\n");
3631
3632 ret = platform_driver_register(&msm_serial_hs_platform_driver);
3633 if (ret) {
3634 pr_err("%s failed to load\n", __func__);
3635 debugfs_remove_recursive(debug_base);
3636 uart_unregister_driver(&msm_hs_driver);
3637 return ret;
3638 }
3639
3640 pr_info("msm_serial_hs module loaded\n");
3641 return ret;
3642}
3643
3644/*
3645 * Called by the upper layer when port is closed.
3646 * - Disables the port
3647 * - Unhook the ISR
3648 */
3649static void msm_hs_shutdown(struct uart_port *uport)
3650{
3651 int ret, rc;
3652 struct msm_hs_port *msm_uport = UARTDM_TO_MSM(uport);
3653 struct circ_buf *tx_buf = &uport->state->xmit;
3654 int data;
3655 unsigned long flags;
3656
3657 if (is_use_low_power_wakeup(msm_uport))
3658 irq_set_irq_wake(msm_uport->wakeup.irq, 0);
3659
3660 if (msm_uport->wakeup.enabled)
3661 disable_irq(msm_uport->wakeup.irq);
3662 else
3663 disable_irq(uport->irq);
3664
3665 spin_lock_irqsave(&uport->lock, flags);
3666 msm_uport->wakeup.enabled = false;
3667 msm_uport->wakeup.ignore = 1;
3668 spin_unlock_irqrestore(&uport->lock, flags);
3669
3670 /* Free the interrupt */
3671 free_irq(uport->irq, msm_uport);
3672 if (is_use_low_power_wakeup(msm_uport)) {
3673 free_irq(msm_uport->wakeup.irq, msm_uport);
3674 MSM_HS_DBG("%s(): wakeup irq freed", __func__);
3675 }
3676 msm_uport->wakeup.freed = true;
3677
3678 /* make sure tx lh finishes */
3679 kthread_flush_worker(&msm_uport->tx.kworker);
3680 ret = wait_event_timeout(msm_uport->tx.wait,
3681 uart_circ_empty(tx_buf), 500);
3682 if (!ret)
3683 MSM_HS_WARN("Shutdown called when tx buff not empty");
3684
3685 msm_hs_resource_vote(msm_uport);
3686 /* Stop remote side from sending data */
3687 msm_hs_disable_flow_control(uport, false);
3688 /* make sure rx lh finishes */
3689 kthread_flush_worker(&msm_uport->rx.kworker);
3690
3691 if (msm_uport->rx.flush != FLUSH_SHUTDOWN) {
3692 /* disable and disconnect rx */
3693 ret = wait_event_timeout(msm_uport->rx.wait,
3694 !msm_uport->rx.pending_flag, 500);
3695 if (!ret)
3696 MSM_HS_WARN("%s(): rx disconnect not complete",
3697 __func__);
3698 msm_hs_disconnect_rx(uport);
3699 }
3700
3701 cancel_delayed_work_sync(&msm_uport->rx.flip_insert_work);
3702 flush_workqueue(msm_uport->hsuart_wq);
3703
3704 /* BAM Disconnect for TX */
3705 data = msm_hs_read(uport, UART_DM_DMEN);
3706 data &= ~UARTDM_TX_BAM_ENABLE_BMSK;
3707 msm_hs_write(uport, UART_DM_DMEN, data);
3708 ret = sps_tx_disconnect(msm_uport);
3709 if (ret)
3710 MSM_HS_ERR("%s(): sps_disconnect failed\n",
3711 __func__);
3712 msm_uport->tx.flush = FLUSH_SHUTDOWN;
3713 /* Disable the transmitter */
3714 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_TX_DISABLE_BMSK);
3715 /* Disable the receiver */
3716 msm_hs_write(uport, UART_DM_CR, UARTDM_CR_RX_DISABLE_BMSK);
3717
3718 msm_uport->imr_reg = 0;
3719 msm_hs_write(uport, UART_DM_IMR, msm_uport->imr_reg);
3720 /*
3721 * Complete all device write before actually disabling uartclk.
3722 * Hence mb() requires here.
3723 */
3724 mb();
3725
3726 msm_uport->rx.buffer_pending = NONE_PENDING;
3727 MSM_HS_DBG("%s(): tx, rx events complete", __func__);
3728
3729 dma_unmap_single(uport->dev, msm_uport->tx.dma_base,
3730 UART_XMIT_SIZE, DMA_TO_DEVICE);
3731
3732 msm_hs_resource_unvote(msm_uport);
3733 rc = atomic_read(&msm_uport->resource_count);
3734 if (rc) {
3735 atomic_set(&msm_uport->resource_count, 1);
3736 MSM_HS_WARN("%s(): removing extra vote\n", __func__);
3737 msm_hs_resource_unvote(msm_uport);
3738 }
3739 if (atomic_read(&msm_uport->client_req_state)) {
3740 MSM_HS_WARN("%s: Client clock vote imbalance\n", __func__);
3741 atomic_set(&msm_uport->client_req_state, 0);
3742 }
3743 if (atomic_read(&msm_uport->client_count)) {
3744 MSM_HS_WARN("%s: Client vote on, forcing to 0\n", __func__);
3745 atomic_set(&msm_uport->client_count, 0);
3746 LOG_USR_MSG(msm_uport->ipc_msm_hs_pwr_ctxt,
3747 "%s: Client_Count 0\n", __func__);
3748 }
3749 msm_hs_unconfig_uart_gpios(uport);
3750 MSM_HS_INFO("%s:UART port closed successfully\n", __func__);
3751}
3752
3753static void __exit msm_serial_hs_exit(void)
3754{
3755 pr_info("msm_serial_hs module removed\n");
3756 debugfs_remove_recursive(debug_base);
3757 platform_driver_unregister(&msm_serial_hs_platform_driver);
3758 uart_unregister_driver(&msm_hs_driver);
3759}
3760
3761static const struct dev_pm_ops msm_hs_dev_pm_ops = {
3762 .runtime_suspend = msm_hs_runtime_suspend,
3763 .runtime_resume = msm_hs_runtime_resume,
3764 .runtime_idle = NULL,
Mukesh Kumar Savaliya8fa1c822018-03-27 00:00:35 +05303765 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(msm_hs_pm_sys_suspend_noirq,
3766 msm_hs_pm_sys_resume_noirq)
Mukesh Kumar Savaliya4305aad2017-09-03 02:09:07 +05303767};
3768
3769static struct platform_driver msm_serial_hs_platform_driver = {
3770 .probe = msm_hs_probe,
3771 .remove = msm_hs_remove,
3772 .driver = {
3773 .name = "msm_serial_hs",
3774 .pm = &msm_hs_dev_pm_ops,
3775 .of_match_table = msm_hs_match_table,
3776 },
3777};
3778
3779static struct uart_driver msm_hs_driver = {
3780 .owner = THIS_MODULE,
3781 .driver_name = "msm_serial_hs",
3782 .dev_name = "ttyHS",
3783 .nr = UARTDM_NR,
3784 .cons = 0,
3785};
3786
3787static const struct uart_ops msm_hs_ops = {
3788 .tx_empty = msm_hs_tx_empty,
3789 .set_mctrl = msm_hs_set_mctrl_locked,
3790 .get_mctrl = msm_hs_get_mctrl_locked,
3791 .stop_tx = msm_hs_stop_tx_locked,
3792 .start_tx = msm_hs_start_tx_locked,
3793 .stop_rx = msm_hs_stop_rx_locked,
3794 .enable_ms = msm_hs_enable_ms_locked,
3795 .break_ctl = msm_hs_break_ctl,
3796 .startup = msm_hs_startup,
3797 .shutdown = msm_hs_shutdown,
3798 .set_termios = msm_hs_set_termios,
3799 .type = msm_hs_type,
3800 .config_port = msm_hs_config_port,
3801 .flush_buffer = NULL,
3802 .ioctl = msm_hs_ioctl,
3803};
3804
3805module_init(msm_serial_hs_init);
3806module_exit(msm_serial_hs_exit);
3807MODULE_DESCRIPTION("High Speed UART Driver for the MSM chipset");
3808MODULE_LICENSE("GPL v2");