blob: 0ce5bd95d25b211c8f8d661c11cef7877a8e6403 [file] [log] [blame]
Sujeev Dias8fc26002017-11-29 20:51:40 -08001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
Sujeev Diasdd66ce02016-09-07 11:35:11 -070059#define GPII_INFO(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_INFO) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
62 __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_ERR(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
70 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
71 __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
79 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86
87enum DEBUG_LOG_LVL {
88 LOG_LVL_MASK_ALL,
89 LOG_LVL_CRITICAL,
90 LOG_LVL_ERROR,
91 LOG_LVL_INFO,
92 LOG_LVL_VERBOSE,
93 LOG_LVL_REG_ACCESS,
94};
95
96enum EV_PRIORITY {
97 EV_PRIORITY_ISR,
98 EV_PRIORITY_TASKLET,
99};
100
101#define GPI_DMA_DRV_NAME "gpi_dma"
102#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
103#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
104#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
105#define IPC_LOG_PAGES (40)
106#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800107#define CMD_TIMEOUT_MS (1000)
Sujeev Diasa29e9602017-11-29 22:13:17 -0800108#define GPII_REG(gpii, ch, fmt, ...) do { \
109 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
110 pr_info("%s:%u:%s: " fmt, gpii->label, \
111 ch, __func__, ##__VA_ARGS__); \
112 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
113 ipc_log_string(gpii->ilctxt, \
114 "ch:%u %s: " fmt, ch, \
115 __func__, ##__VA_ARGS__); \
116 } while (0)
117#define GPII_VERB(gpii, ch, fmt, ...) do { \
118 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
119 pr_info("%s:%u:%s: " fmt, gpii->label, \
120 ch, __func__, ##__VA_ARGS__); \
121 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
122 ipc_log_string(gpii->ilctxt, \
123 "ch:%u %s: " fmt, ch, \
124 __func__, ##__VA_ARGS__); \
125 } while (0)
126
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700127#else
128#define IPC_LOG_PAGES (2)
129#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
130#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800131#define CMD_TIMEOUT_MS (250)
Sujeev Diasa29e9602017-11-29 22:13:17 -0800132/* verbose and register logging are disabled if !debug */
133#define GPII_REG(gpii, ch, fmt, ...)
134#define GPII_VERB(gpii, ch, fmt, ...)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700135#endif
136
137#define GPI_LABEL_SIZE (256)
138#define GPI_DBG_COMMON (99)
139#define MAX_CHANNELS_PER_GPII (2)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700140#define GPI_TX_CHAN (0)
141#define GPI_RX_CHAN (1)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700142#define STATE_IGNORE (U32_MAX)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700143#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700144
145struct __packed gpi_error_log_entry {
146 u32 routine : 4;
147 u32 type : 4;
148 u32 reserved0 : 4;
149 u32 code : 4;
150 u32 reserved1 : 3;
151 u32 chid : 5;
152 u32 reserved2 : 1;
153 u32 chtype : 1;
154 u32 ee : 1;
155};
156
157struct __packed xfer_compl_event {
158 u64 ptr;
159 u32 length : 24;
160 u8 code;
161 u16 status;
162 u8 type;
163 u8 chid;
164};
165
166struct __packed immediate_data_event {
167 u8 data_bytes[8];
168 u8 length : 4;
169 u8 resvd : 4;
170 u16 tre_index;
171 u8 code;
172 u16 status;
173 u8 type;
174 u8 chid;
175};
176
177struct __packed qup_notif_event {
178 u32 status;
179 u32 time;
180 u32 count :24;
181 u8 resvd;
182 u16 resvd1;
183 u8 type;
184 u8 chid;
185};
186
187struct __packed gpi_ere {
188 u32 dword[4];
189};
190
191enum GPI_EV_TYPE {
192 XFER_COMPLETE_EV_TYPE = 0x22,
193 IMMEDIATE_DATA_EV_TYPE = 0x30,
194 QUP_NOTIF_EV_TYPE = 0x31,
195 STALE_EV_TYPE = 0xFF,
196};
197
198union __packed gpi_event {
199 struct __packed xfer_compl_event xfer_compl_event;
200 struct __packed immediate_data_event immediate_data_event;
201 struct __packed qup_notif_event qup_notif_event;
202 struct __packed gpi_ere gpi_ere;
203};
204
205enum gpii_irq_settings {
206 DEFAULT_IRQ_SETTINGS,
207 MASK_IEOB_SETTINGS,
208};
209
210enum gpi_ev_state {
211 DEFAULT_EV_CH_STATE = 0,
212 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
213 EV_STATE_ALLOCATED,
214 MAX_EV_STATES
215};
216
217static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
218 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
219 [EV_STATE_ALLOCATED] = "ALLOCATED",
220};
221
222#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
223 "INVALID" : gpi_ev_state_str[state])
224
225enum gpi_ch_state {
226 DEFAULT_CH_STATE = 0x0,
227 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
228 CH_STATE_ALLOCATED = 0x1,
229 CH_STATE_STARTED = 0x2,
230 CH_STATE_STOPPED = 0x3,
231 CH_STATE_STOP_IN_PROC = 0x4,
232 CH_STATE_ERROR = 0xf,
233 MAX_CH_STATES
234};
235
236static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
237 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
238 [CH_STATE_ALLOCATED] = "ALLOCATED",
239 [CH_STATE_STARTED] = "STARTED",
240 [CH_STATE_STOPPED] = "STOPPED",
241 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
242 [CH_STATE_ERROR] = "ERROR",
243};
244
245#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
246 "INVALID" : gpi_ch_state_str[state])
247
248enum gpi_cmd {
249 GPI_CH_CMD_BEGIN,
250 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
251 GPI_CH_CMD_START,
252 GPI_CH_CMD_STOP,
253 GPI_CH_CMD_RESET,
254 GPI_CH_CMD_DE_ALLOC,
255 GPI_CH_CMD_UART_SW_STALE,
256 GPI_CH_CMD_UART_RFR_READY,
257 GPI_CH_CMD_UART_RFR_NOT_READY,
258 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
259 GPI_EV_CMD_BEGIN,
260 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
261 GPI_EV_CMD_RESET,
262 GPI_EV_CMD_DEALLOC,
263 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
264 GPI_MAX_CMD,
265};
266
267#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
268
269static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
270 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
271 [GPI_CH_CMD_START] = "CH START",
272 [GPI_CH_CMD_STOP] = "CH STOP",
273 [GPI_CH_CMD_RESET] = "CH_RESET",
274 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
275 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
276 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
277 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
278 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
279 [GPI_EV_CMD_RESET] = "EV RESET",
280 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
281};
282
283#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
284 gpi_cmd_str[cmd])
285
286static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
287 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
288 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
289 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
290 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
291 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
292 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
293};
294
295#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
296 "INVALID" : gpi_cb_event_str[event])
297
298enum se_protocol {
299 SE_PROTOCOL_SPI = 1,
300 SE_PROTOCOL_UART = 2,
301 SE_PROTOCOL_I2C = 3,
302 SE_MAX_PROTOCOL
303};
304
305/*
306 * @DISABLE_STATE: no register access allowed
307 * @CONFIG_STATE: client has configured the channel
308 * @PREP_HARDWARE: register access is allowed
309 * however, no processing EVENTS
310 * @ACTIVE_STATE: channels are fully operational
311 * @PREPARE_TERIMNATE: graceful termination of channels
312 * register access is allowed
313 * @PAUSE_STATE: channels are active, but not processing any events
314 */
315enum gpi_pm_state {
316 DISABLE_STATE,
317 CONFIG_STATE,
318 PREPARE_HARDWARE,
319 ACTIVE_STATE,
320 PREPARE_TERMINATE,
321 PAUSE_STATE,
322 MAX_PM_STATE
323};
324
325#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
326
327static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
328 [DISABLE_STATE] = "DISABLE",
329 [CONFIG_STATE] = "CONFIG",
330 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
331 [ACTIVE_STATE] = "ACTIVE",
332 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
333 [PAUSE_STATE] = "PAUSE",
334};
335
336#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
337 "INVALID" : gpi_pm_state_str[state])
338
339static const struct {
340 enum gpi_cmd gpi_cmd;
341 u32 opcode;
342 u32 state;
343 u32 timeout_ms;
344} gpi_cmd_info[GPI_MAX_CMD] = {
345 {
346 GPI_CH_CMD_ALLOCATE,
347 GPI_GPII_n_CH_CMD_ALLOCATE,
348 CH_STATE_ALLOCATED,
349 CMD_TIMEOUT_MS,
350 },
351 {
352 GPI_CH_CMD_START,
353 GPI_GPII_n_CH_CMD_START,
354 CH_STATE_STARTED,
355 CMD_TIMEOUT_MS,
356 },
357 {
358 GPI_CH_CMD_STOP,
359 GPI_GPII_n_CH_CMD_STOP,
360 CH_STATE_STOPPED,
361 CMD_TIMEOUT_MS,
362 },
363 {
364 GPI_CH_CMD_RESET,
365 GPI_GPII_n_CH_CMD_RESET,
366 CH_STATE_ALLOCATED,
367 CMD_TIMEOUT_MS,
368 },
369 {
370 GPI_CH_CMD_DE_ALLOC,
371 GPI_GPII_n_CH_CMD_DE_ALLOC,
372 CH_STATE_NOT_ALLOCATED,
373 CMD_TIMEOUT_MS,
374 },
375 {
376 GPI_CH_CMD_UART_SW_STALE,
377 GPI_GPII_n_CH_CMD_UART_SW_STALE,
378 STATE_IGNORE,
379 CMD_TIMEOUT_MS,
380 },
381 {
382 GPI_CH_CMD_UART_RFR_READY,
383 GPI_GPII_n_CH_CMD_UART_RFR_READY,
384 STATE_IGNORE,
385 CMD_TIMEOUT_MS,
386 },
387 {
388 GPI_CH_CMD_UART_RFR_NOT_READY,
389 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
390 STATE_IGNORE,
391 CMD_TIMEOUT_MS,
392 },
393 {
394 GPI_EV_CMD_ALLOCATE,
395 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
396 EV_STATE_ALLOCATED,
397 CMD_TIMEOUT_MS,
398 },
399 {
400 GPI_EV_CMD_RESET,
401 GPI_GPII_n_EV_CH_CMD_RESET,
402 EV_STATE_ALLOCATED,
403 CMD_TIMEOUT_MS,
404 },
405 {
406 GPI_EV_CMD_DEALLOC,
407 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
408 EV_STATE_NOT_ALLOCATED,
409 CMD_TIMEOUT_MS,
410 },
411};
412
413struct gpi_ring {
414 void *pre_aligned;
415 size_t alloc_size;
416 phys_addr_t phys_addr;
417 dma_addr_t dma_handle;
418 void *base;
419 void *wp;
420 void *rp;
421 u32 len;
422 u32 el_size;
423 u32 elements;
424 bool configured;
425};
426
427struct sg_tre {
428 void *ptr;
429 void *wp; /* store chan wp for debugging */
430};
431
432struct gpi_dbg_log {
433 void *addr;
434 u64 time;
435 u32 val;
436 bool read;
437};
438
439struct gpi_dev {
440 struct dma_device dma_device;
441 struct device *dev;
442 struct resource *res;
443 void __iomem *regs;
444 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
445 u32 gpii_mask; /* gpii instances available for apps */
446 u32 ev_factor; /* ev ring length factor */
Sujeev Dias69484212017-08-31 10:06:53 -0700447 u32 smmu_cfg;
448 dma_addr_t iova_base;
449 size_t iova_size;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700450 struct gpii *gpiis;
451 void *ilctxt;
452 u32 ipc_log_lvl;
453 u32 klog_lvl;
454 struct dentry *dentry;
455};
456
457struct gpii_chan {
458 struct virt_dma_chan vc;
459 u32 chid;
460 u32 seid;
461 enum se_protocol protocol;
462 enum EV_PRIORITY priority; /* comes from clients DT node */
463 struct gpii *gpii;
464 enum gpi_ch_state ch_state;
465 enum gpi_pm_state pm_state;
466 void __iomem *ch_cntxt_base_reg;
467 void __iomem *ch_cntxt_db_reg;
468 void __iomem *ch_ring_base_lsb_reg,
469 *ch_ring_rp_lsb_reg,
470 *ch_ring_wp_lsb_reg;
471 void __iomem *ch_cmd_reg;
472 u32 req_tres; /* # of tre's client requested */
473 u32 dir;
474 struct gpi_ring ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700475 struct gpi_client_info client_info;
476};
477
478struct gpii {
479 u32 gpii_id;
480 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
481 struct gpi_dev *gpi_dev;
482 enum EV_PRIORITY ev_priority;
483 enum se_protocol protocol;
484 int irq;
485 void __iomem *regs; /* points to gpi top */
486 void __iomem *ev_cntxt_base_reg;
487 void __iomem *ev_cntxt_db_reg;
488 void __iomem *ev_ring_base_lsb_reg,
489 *ev_ring_rp_lsb_reg,
490 *ev_ring_wp_lsb_reg;
491 void __iomem *ev_cmd_reg;
492 void __iomem *ieob_src_reg;
493 void __iomem *ieob_clr_reg;
494 struct mutex ctrl_lock;
495 enum gpi_ev_state ev_state;
496 bool configured_irq;
497 enum gpi_pm_state pm_state;
498 rwlock_t pm_lock;
499 struct gpi_ring ev_ring;
500 struct tasklet_struct ev_task; /* event processing tasklet */
501 struct completion cmd_completion;
502 enum gpi_cmd gpi_cmd;
503 u32 cntxt_type_irq_msk;
504 void *ilctxt;
505 u32 ipc_log_lvl;
506 u32 klog_lvl;
507 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
508 atomic_t dbg_index;
509 char label[GPI_LABEL_SIZE];
510 struct dentry *dentry;
511};
512
513struct gpi_desc {
514 struct virt_dma_desc vd;
515 void *wp; /* points to TRE last queued during issue_pending */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700516 void *db; /* DB register to program */
517 struct gpii_chan *gpii_chan;
518};
519
Sujeev Dias69484212017-08-31 10:06:53 -0700520#define GPI_SMMU_ATTACH BIT(0)
521#define GPI_SMMU_S1_BYPASS BIT(1)
522#define GPI_SMMU_FAST BIT(2)
523#define GPI_SMMU_ATOMIC BIT(3)
524
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700525const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
526 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
527};
528
529struct dentry *pdentry;
530static irqreturn_t gpi_handle_irq(int irq, void *data);
531static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
532static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
533static void gpi_process_events(struct gpii *gpii);
534
535static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
536{
537 return container_of(dma_chan, struct gpii_chan, vc.chan);
538}
539
540static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
541{
542 return container_of(vd, struct gpi_desc, vd);
543}
544
545static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
546 void *addr)
547{
548 return ring->phys_addr + (addr - ring->base);
549}
550
551static inline void *to_virtual(const struct gpi_ring *const ring,
552 phys_addr_t addr)
553{
554 return ring->base + (addr - ring->phys_addr);
555}
556
557#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
558static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
559{
560 u64 time = sched_clock();
561 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +0530562 unsigned long offset = addr - gpii->regs;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700563 u32 val;
564
565 val = readl_relaxed(addr);
566 index &= (GPI_DBG_LOG_SIZE - 1);
567 (gpii->dbg_log + index)->addr = addr;
568 (gpii->dbg_log + index)->time = time;
569 (gpii->dbg_log + index)->val = val;
570 (gpii->dbg_log + index)->read = true;
571 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +0530572 offset, val);
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700573 return val;
574}
575static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
576{
577 u64 time = sched_clock();
578 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +0530579 unsigned long offset = addr - gpii->regs;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700580
581 index &= (GPI_DBG_LOG_SIZE - 1);
582 (gpii->dbg_log + index)->addr = addr;
583 (gpii->dbg_log + index)->time = time;
584 (gpii->dbg_log + index)->val = val;
585 (gpii->dbg_log + index)->read = false;
586
587 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +0530588 offset, val);
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700589 writel_relaxed(val, addr);
590}
591#else
592static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
593{
594 u32 val = readl_relaxed(addr);
595
596 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
597 addr - gpii->regs, val);
598 return val;
599}
600static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
601{
602 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
603 addr - gpii->regs, val);
604 writel_relaxed(val, addr);
605}
606#endif
607
608/* gpi_write_reg_field - write to specific bit field */
609static inline void gpi_write_reg_field(struct gpii *gpii,
610 void __iomem *addr,
611 u32 mask,
612 u32 shift,
613 u32 val)
614{
615 u32 tmp = gpi_read_reg(gpii, addr);
616
617 tmp &= ~mask;
618 val = tmp | ((val << shift) & mask);
619 gpi_write_reg(gpii, addr, val);
620}
621
622static void gpi_disable_interrupts(struct gpii *gpii)
623{
624 struct {
625 u32 offset;
626 u32 mask;
627 u32 shift;
628 u32 val;
629 } default_reg[] = {
630 {
631 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
632 (gpii->gpii_id),
633 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
634 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
635 0,
636 },
637 {
638 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
639 (gpii->gpii_id),
640 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
641 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
642 0,
643 },
644 {
645 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
646 (gpii->gpii_id),
647 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
648 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
649 0,
650 },
651 {
652 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
653 (gpii->gpii_id),
654 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
655 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
656 0,
657 },
658 {
659 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
660 (gpii->gpii_id),
661 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
662 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
663 0,
664 },
665 {
666 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
667 (gpii->gpii_id),
668 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
669 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
670 0,
671 },
672 {
673 GPI_GPII_n_CNTXT_INTSET_OFFS
674 (gpii->gpii_id),
675 GPI_GPII_n_CNTXT_INTSET_BMSK,
676 GPI_GPII_n_CNTXT_INTSET_SHFT,
677 0,
678 },
679 { 0 },
680 };
681 int i;
682
683 for (i = 0; default_reg[i].offset; i++)
684 gpi_write_reg_field(gpii, gpii->regs +
685 default_reg[i].offset,
686 default_reg[i].mask,
687 default_reg[i].shift,
688 default_reg[i].val);
689 gpii->cntxt_type_irq_msk = 0;
690 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
691 gpii->configured_irq = false;
692}
693
694/* configure and enable interrupts */
695static int gpi_config_interrupts(struct gpii *gpii,
696 enum gpii_irq_settings settings,
697 bool mask)
698{
699 int ret;
700 int i;
701 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
702 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
703 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
704 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
705 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
706 struct {
707 u32 offset;
708 u32 mask;
709 u32 shift;
710 u32 val;
711 } default_reg[] = {
712 {
713 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
714 (gpii->gpii_id),
715 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
716 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
717 def_type,
718 },
719 {
720 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
721 (gpii->gpii_id),
722 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
723 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
724 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
725 },
726 {
727 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
728 (gpii->gpii_id),
729 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
730 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
731 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
732 },
733 {
734 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
735 (gpii->gpii_id),
736 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
737 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
738 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
739 },
740 {
741 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
742 (gpii->gpii_id),
743 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
744 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
745 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
746 },
747 {
748 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
749 (gpii->gpii_id),
750 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
751 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
752 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
753 },
754 {
755 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
756 (gpii->gpii_id),
757 U32_MAX,
758 0,
759 0x0,
760 },
761 {
762 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
763 (gpii->gpii_id),
764 U32_MAX,
765 0,
766 0x0,
767 },
768 {
769 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
770 (gpii->gpii_id),
771 U32_MAX,
772 0,
773 0x0,
774 },
775 {
776 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
777 (gpii->gpii_id),
778 U32_MAX,
779 0,
780 0x0,
781 },
782 {
783 GPI_GPII_n_CNTXT_INTSET_OFFS
784 (gpii->gpii_id),
785 GPI_GPII_n_CNTXT_INTSET_BMSK,
786 GPI_GPII_n_CNTXT_INTSET_SHFT,
787 0x01,
788 },
789 {
790 GPI_GPII_n_ERROR_LOG_OFFS
791 (gpii->gpii_id),
792 U32_MAX,
793 0,
794 0x00,
795 },
796 { 0 },
797 };
798
799 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
800 (gpii->configured_irq) ? 'F' : 'T',
801 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
802 (mask) ? 'T' : 'F');
803
804 if (gpii->configured_irq == false) {
805 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
806 gpi_handle_irq, IRQF_TRIGGER_HIGH,
807 gpii->label, gpii);
808 if (ret < 0) {
809 GPII_CRITIC(gpii, GPI_DBG_COMMON,
810 "error request irq:%d ret:%d\n",
811 gpii->irq, ret);
812 return ret;
813 }
814 }
815
816 if (settings == MASK_IEOB_SETTINGS) {
817 /*
818 * GPII only uses one EV ring per gpii so we can globally
819 * enable/disable IEOB interrupt
820 */
821 if (mask)
822 gpii->cntxt_type_irq_msk |=
823 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
824 else
825 gpii->cntxt_type_irq_msk &=
826 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
827 gpi_write_reg_field(gpii, gpii->regs +
828 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
829 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
830 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
831 gpii->cntxt_type_irq_msk);
832 } else {
833 for (i = 0; default_reg[i].offset; i++)
834 gpi_write_reg_field(gpii, gpii->regs +
835 default_reg[i].offset,
836 default_reg[i].mask,
837 default_reg[i].shift,
838 default_reg[i].val);
839 gpii->cntxt_type_irq_msk = def_type;
840 };
841
842 gpii->configured_irq = true;
843
844 return 0;
845}
846
847/* Sends gpii event or channel command */
848static int gpi_send_cmd(struct gpii *gpii,
849 struct gpii_chan *gpii_chan,
850 enum gpi_cmd gpi_cmd)
851{
852 u32 chid = MAX_CHANNELS_PER_GPII;
853 u32 cmd;
854 unsigned long timeout;
855 void __iomem *cmd_reg;
856
857 if (gpi_cmd >= GPI_MAX_CMD)
858 return -EINVAL;
859 if (IS_CHAN_CMD(gpi_cmd))
860 chid = gpii_chan->chid;
861
862 GPII_INFO(gpii, chid,
863 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
864
865 /* send opcode and wait for completion */
866 reinit_completion(&gpii->cmd_completion);
867 gpii->gpi_cmd = gpi_cmd;
868
869 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
870 gpii->ev_cmd_reg;
871 cmd = IS_CHAN_CMD(gpi_cmd) ?
872 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
873 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
874 gpi_write_reg(gpii, cmd_reg, cmd);
875 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
876 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
877
878 if (!timeout) {
879 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
880 TO_GPI_CMD_STR(gpi_cmd));
881 return -EIO;
882 }
883
884 /* confirm new ch state is correct , if the cmd is a state change cmd */
885 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
886 return 0;
887 if (IS_CHAN_CMD(gpi_cmd) &&
888 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
889 return 0;
890 if (!IS_CHAN_CMD(gpi_cmd) &&
891 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
892 return 0;
893
894 return -EIO;
895}
896
897/* program transfer ring DB register */
898static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
899 struct gpi_ring *ring,
900 void *wp)
901{
902 struct gpii *gpii = gpii_chan->gpii;
903 phys_addr_t p_wp;
904
905 p_wp = to_physical(ring, wp);
906 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
907}
908
909/* program event ring DB register */
910static inline void gpi_write_ev_db(struct gpii *gpii,
911 struct gpi_ring *ring,
912 void *wp)
913{
914 phys_addr_t p_wp;
915
916 p_wp = ring->phys_addr + (wp - ring->base);
917 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
918}
919
920/* notify client with generic event */
921static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
922 enum msm_gpi_cb_event event,
923 u64 status)
924{
925 struct gpii *gpii = gpii_chan->gpii;
926 struct gpi_client_info *client_info = &gpii_chan->client_info;
927 struct msm_gpi_cb msm_gpi_cb = {0};
928
929 GPII_ERR(gpii, gpii_chan->chid,
930 "notifying event:%s with status:%llu\n",
931 TO_GPI_CB_EVENT_STR(event), status);
932
933 msm_gpi_cb.cb_event = event;
934 msm_gpi_cb.status = status;
935 msm_gpi_cb.timestamp = sched_clock();
936 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
937 client_info->cb_param);
938}
939
940/* process transfer completion interrupt */
941static void gpi_process_ieob(struct gpii *gpii)
942{
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700943
Sujeev Diasa29e9602017-11-29 22:13:17 -0800944 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700945
946 /* process events based on priority */
947 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
948 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
949 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
950 tasklet_schedule(&gpii->ev_task);
951 } else {
952 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
953 gpi_process_events(gpii);
954 }
955}
956
957/* process channel control interrupt */
958static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
959{
960 u32 gpii_id = gpii->gpii_id;
961 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
962 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
963 u32 chid;
964 struct gpii_chan *gpii_chan;
965 u32 state;
966
967 /* clear the status */
968 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
969 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
970
971 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
972 if (!(BIT(chid) & ch_irq))
973 continue;
974
975 gpii_chan = &gpii->gpii_chan[chid];
976 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
977 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
978 CNTXT_0_CONFIG);
979 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
980 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
981
982 /*
983 * CH_CMD_DEALLOC cmd always successful. However cmd does
984 * not change hardware status. So overwriting software state
985 * to default state.
986 */
987 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
988 state = DEFAULT_CH_STATE;
989 gpii_chan->ch_state = state;
990 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
991 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
992
993 /*
994 * Triggering complete all if ch_state is not a stop in process.
995 * Stop in process is a transition state and we will wait for
996 * stop interrupt before notifying.
997 */
998 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
999 complete_all(&gpii->cmd_completion);
1000
1001 /* notifying clients if in error state */
1002 if (gpii_chan->ch_state == CH_STATE_ERROR)
1003 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
1004 __LINE__);
1005 }
1006}
1007
1008/* processing gpi level error interrupts */
1009static void gpi_process_glob_err_irq(struct gpii *gpii)
1010{
1011 u32 gpii_id = gpii->gpii_id;
1012 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1013 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1014 u32 error_log;
1015 u32 chid;
1016 struct gpii_chan *gpii_chan;
1017 struct gpi_client_info *client_info;
1018 struct msm_gpi_cb msm_gpi_cb;
1019 struct gpi_error_log_entry *log_entry =
1020 (struct gpi_error_log_entry *)&error_log;
1021
1022 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1023 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1024
1025 /* only error interrupt should be set */
1026 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1027 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1028 irq_stts);
1029 goto error_irq;
1030 }
1031
1032 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1033 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1034 gpi_write_reg(gpii, gpii->regs + offset, 0);
1035
1036 /* get channel info */
1037 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1038 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1039 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1040 chid);
1041 goto error_irq;
1042 }
1043
1044 gpii_chan = &gpii->gpii_chan[chid];
1045 client_info = &gpii_chan->client_info;
1046
1047 /* notify client with error log */
1048 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1049 msm_gpi_cb.error_log.routine = log_entry->routine;
1050 msm_gpi_cb.error_log.type = log_entry->type;
1051 msm_gpi_cb.error_log.error_code = log_entry->code;
1052 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1053 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1054 GPII_ERR(gpii, gpii_chan->chid,
1055 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1056 log_entry->ee, log_entry->chtype,
1057 msm_gpi_cb.error_log.routine,
1058 msm_gpi_cb.error_log.type,
1059 msm_gpi_cb.error_log.error_code);
1060 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1061 client_info->cb_param);
1062
1063 return;
1064
1065error_irq:
1066 for (chid = 0, gpii_chan = gpii->gpii_chan;
1067 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1068 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1069 irq_stts);
1070}
1071
1072/* gpii interrupt handler */
1073static irqreturn_t gpi_handle_irq(int irq, void *data)
1074{
1075 struct gpii *gpii = data;
1076 u32 type;
1077 unsigned long flags;
1078 u32 offset;
1079 u32 gpii_id = gpii->gpii_id;
1080
1081 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1082
1083 read_lock_irqsave(&gpii->pm_lock, flags);
1084
1085 /*
1086 * States are out of sync to receive interrupt
1087 * while software state is in DISABLE state, bailing out.
1088 */
1089 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1090 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1091 "receive interrupt while in %s state\n",
1092 TO_GPI_PM_STR(gpii->pm_state));
1093 goto exit_irq;
1094 }
1095
1096 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1097 type = gpi_read_reg(gpii, gpii->regs + offset);
1098
1099 do {
1100 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1101 type);
1102 /* global gpii error */
1103 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1104 GPII_ERR(gpii, GPI_DBG_COMMON,
1105 "processing global error irq\n");
1106 gpi_process_glob_err_irq(gpii);
1107 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1108 }
1109
Sujeev Diasa29e9602017-11-29 22:13:17 -08001110 /* transfer complete interrupt */
1111 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1112 GPII_VERB(gpii, GPI_DBG_COMMON,
1113 "process IEOB interrupts\n");
1114 gpi_process_ieob(gpii);
1115 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1116 }
1117
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001118 /* event control irq */
1119 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1120 u32 ev_state;
1121 u32 ev_ch_irq;
1122
1123 GPII_INFO(gpii, GPI_DBG_COMMON,
1124 "processing EV CTRL interrupt\n");
1125 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1126 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1127
1128 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1129 (gpii_id);
1130 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1131 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1132 CNTXT_0_CONFIG);
1133 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1134 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1135
1136 /*
1137 * CMD EV_CMD_DEALLOC is always successful. However
1138 * cmd does not change hardware status. So overwriting
1139 * software state to default state.
1140 */
1141 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1142 ev_state = DEFAULT_EV_CH_STATE;
1143
1144 gpii->ev_state = ev_state;
1145 GPII_INFO(gpii, GPI_DBG_COMMON,
1146 "setting EV state to %s\n",
1147 TO_GPI_EV_STATE_STR(gpii->ev_state));
1148 complete_all(&gpii->cmd_completion);
1149 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1150 }
1151
1152 /* channel control irq */
1153 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1154 GPII_INFO(gpii, GPI_DBG_COMMON,
1155 "process CH CTRL interrupts\n");
1156 gpi_process_ch_ctrl_irq(gpii);
1157 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1158 }
1159
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001160 if (type) {
1161 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1162 "Unhandled interrupt status:0x%x\n", type);
1163 goto exit_irq;
1164 }
1165 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1166 type = gpi_read_reg(gpii, gpii->regs + offset);
1167 } while (type);
1168
1169exit_irq:
1170 read_unlock_irqrestore(&gpii->pm_lock, flags);
1171 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1172
1173 return IRQ_HANDLED;
1174}
1175
1176/* process qup notification events */
1177static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1178 struct qup_notif_event *notif_event)
1179{
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001180 struct gpi_client_info *client_info = &gpii_chan->client_info;
1181 struct msm_gpi_cb msm_gpi_cb;
1182
Sujeev Diasa29e9602017-11-29 22:13:17 -08001183 GPII_VERB(gpii_chan->gpii, gpii_chan->chid,
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001184 "status:0x%x time:0x%x count:0x%x\n",
1185 notif_event->status, notif_event->time, notif_event->count);
1186
1187 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1188 msm_gpi_cb.status = notif_event->status;
1189 msm_gpi_cb.timestamp = notif_event->time;
1190 msm_gpi_cb.count = notif_event->count;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001191 GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n",
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001192 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1193 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1194 client_info->cb_param);
1195}
1196
1197/* process DMA Immediate completion data events */
1198static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1199 struct immediate_data_event *imed_event)
1200{
1201 struct gpii *gpii = gpii_chan->gpii;
1202 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001203 struct virt_dma_desc *vd;
1204 struct gpi_desc *gpi_desc;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001205 void *tre = ch_ring->base +
1206 (ch_ring->el_size * imed_event->tre_index);
1207 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
Sujeev Diase0197092017-11-27 20:36:26 -08001208 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001209
1210 /*
1211 * If channel not active don't process event but let
1212 * client know pending event is available
1213 */
1214 if (gpii_chan->pm_state != ACTIVE_STATE) {
1215 GPII_ERR(gpii, gpii_chan->chid,
1216 "skipping processing event because ch @ %s state\n",
1217 TO_GPI_PM_STR(gpii_chan->pm_state));
1218 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1219 __LINE__);
1220 return;
1221 }
1222
Sujeev Diase0197092017-11-27 20:36:26 -08001223 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001224 vd = vchan_next_desc(&gpii_chan->vc);
1225 if (!vd) {
1226 struct gpi_ere *gpi_ere;
1227 struct msm_gpi_tre *gpi_tre;
1228
Sujeev Diase0197092017-11-27 20:36:26 -08001229 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001230 GPII_ERR(gpii, gpii_chan->chid,
1231 "event without a pending descriptor!\n");
1232 gpi_ere = (struct gpi_ere *)imed_event;
1233 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1234 gpi_ere->dword[0], gpi_ere->dword[1],
1235 gpi_ere->dword[2], gpi_ere->dword[3]);
1236 gpi_tre = tre;
1237 GPII_ERR(gpii, gpii_chan->chid,
1238 "Pending TRE: %08x %08x %08x %08x\n",
1239 gpi_tre->dword[0], gpi_tre->dword[1],
1240 gpi_tre->dword[2], gpi_tre->dword[3]);
1241 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1242 __LINE__);
1243 return;
1244 }
1245 gpi_desc = to_gpi_desc(vd);
1246
1247 /* Event TR RP gen. don't match descriptor TR */
1248 if (gpi_desc->wp != tre) {
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301249 phys_addr_t p_wp = to_physical(ch_ring, gpi_desc->wp);
1250 phys_addr_t p_tre = to_physical(ch_ring, tre);
1251
Sujeev Diase0197092017-11-27 20:36:26 -08001252 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001253 GPII_ERR(gpii, gpii_chan->chid,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301254 "EOT/EOB received for wrong TRE %pa != %pa\n",
1255 &p_wp, &p_tre);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001256 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1257 __LINE__);
1258 return;
1259 }
1260
1261 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001262 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001263
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001264
1265 /*
1266 * RP pointed by Event is to last TRE processed,
1267 * we need to update ring rp to tre + 1
1268 */
1269 tre += ch_ring->el_size;
1270 if (tre >= (ch_ring->base + ch_ring->len))
1271 tre = ch_ring->base;
1272 ch_ring->rp = tre;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001273
1274 /* make sure rp updates are immediately visible to all cores */
1275 smp_wmb();
1276
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001277 tx_cb_param = vd->tx.callback_param;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001278 if (vd->tx.callback && tx_cb_param) {
Sujeev Dias8fc26002017-11-29 20:51:40 -08001279 struct msm_gpi_tre *imed_tre = &tx_cb_param->imed_tre;
1280
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001281 GPII_VERB(gpii, gpii_chan->chid,
1282 "cb_length:%u compl_code:0x%x status:0x%x\n",
1283 imed_event->length, imed_event->code,
1284 imed_event->status);
Sujeev Dias8fc26002017-11-29 20:51:40 -08001285 /* Update immediate data if any from event */
1286 *imed_tre = *((struct msm_gpi_tre *)imed_event);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001287 tx_cb_param->length = imed_event->length;
1288 tx_cb_param->completion_code = imed_event->code;
1289 tx_cb_param->status = imed_event->status;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001290 vd->tx.callback(tx_cb_param);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001291 }
Sujeev Diasa29e9602017-11-29 22:13:17 -08001292 kfree(gpi_desc);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001293}
1294
1295/* processing transfer completion events */
1296static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1297 struct xfer_compl_event *compl_event)
1298{
1299 struct gpii *gpii = gpii_chan->gpii;
1300 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001301 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001302 struct virt_dma_desc *vd;
1303 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1304 struct gpi_desc *gpi_desc;
Sujeev Diase0197092017-11-27 20:36:26 -08001305 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001306
1307 /* only process events on active channel */
1308 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1309 GPII_ERR(gpii, gpii_chan->chid,
1310 "skipping processing event because ch @ %s state\n",
1311 TO_GPI_PM_STR(gpii_chan->pm_state));
1312 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1313 __LINE__);
1314 return;
1315 }
1316
Sujeev Diase0197092017-11-27 20:36:26 -08001317 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001318 vd = vchan_next_desc(&gpii_chan->vc);
1319 if (!vd) {
1320 struct gpi_ere *gpi_ere;
1321
Sujeev Diase0197092017-11-27 20:36:26 -08001322 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001323 GPII_ERR(gpii, gpii_chan->chid,
1324 "Event without a pending descriptor!\n");
1325 gpi_ere = (struct gpi_ere *)compl_event;
1326 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1327 gpi_ere->dword[0], gpi_ere->dword[1],
1328 gpi_ere->dword[2], gpi_ere->dword[3]);
1329 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1330 __LINE__);
1331 return;
1332 }
1333
1334 gpi_desc = to_gpi_desc(vd);
1335
1336 /* TRE Event generated didn't match descriptor's TRE */
1337 if (gpi_desc->wp != ev_rp) {
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301338 phys_addr_t p_wp = to_physical(ch_ring, gpi_desc->wp);
1339 phys_addr_t p_ev_rp = to_physical(ch_ring, ev_rp);
1340
Sujeev Diase0197092017-11-27 20:36:26 -08001341 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001342 GPII_ERR(gpii, gpii_chan->chid,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301343 "EOT\EOB received for wrong TRE %pa != %pa\n",
1344 &p_wp, &p_ev_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001345 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1346 __LINE__);
1347 return;
1348 }
1349
1350 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001351 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001352
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001353
1354 /*
1355 * RP pointed by Event is to last TRE processed,
1356 * we need to update ring rp to ev_rp + 1
1357 */
1358 ev_rp += ch_ring->el_size;
1359 if (ev_rp >= (ch_ring->base + ch_ring->len))
1360 ev_rp = ch_ring->base;
1361 ch_ring->rp = ev_rp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001362
1363 /* update must be visible to other cores */
1364 smp_wmb();
1365
1366 tx_cb_param = vd->tx.callback_param;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001367 if (vd->tx.callback && tx_cb_param) {
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001368 GPII_VERB(gpii, gpii_chan->chid,
1369 "cb_length:%u compl_code:0x%x status:0x%x\n",
1370 compl_event->length, compl_event->code,
1371 compl_event->status);
1372 tx_cb_param->length = compl_event->length;
1373 tx_cb_param->completion_code = compl_event->code;
1374 tx_cb_param->status = compl_event->status;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001375 vd->tx.callback(tx_cb_param);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001376 }
Sujeev Diasa29e9602017-11-29 22:13:17 -08001377 kfree(gpi_desc);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001378}
1379
1380/* process all events */
1381static void gpi_process_events(struct gpii *gpii)
1382{
1383 struct gpi_ring *ev_ring = &gpii->ev_ring;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001384 phys_addr_t cntxt_rp, local_rp;
1385 void *rp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001386 union gpi_event *gpi_event;
1387 struct gpii_chan *gpii_chan;
1388 u32 chid, type;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001389
1390 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
Sujeev Diasa29e9602017-11-29 22:13:17 -08001391 rp = to_virtual(ev_ring, cntxt_rp);
1392 local_rp = to_physical(ev_ring, ev_ring->rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001393
Sujeev Diasa29e9602017-11-29 22:13:17 -08001394 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp:%pa local_rp:%pa\n",
1395 &cntxt_rp, &local_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001396
1397 do {
Sujeev Diasa29e9602017-11-29 22:13:17 -08001398 while (rp != ev_ring->rp) {
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001399 gpi_event = ev_ring->rp;
1400 chid = gpi_event->xfer_compl_event.chid;
1401 type = gpi_event->xfer_compl_event.type;
1402 GPII_VERB(gpii, GPI_DBG_COMMON,
Sujeev Diasa29e9602017-11-29 22:13:17 -08001403 "chid:%u type:0x%x %08x %08x %08x %08x\n",
1404 chid, type,
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001405 gpi_event->gpi_ere.dword[0],
1406 gpi_event->gpi_ere.dword[1],
1407 gpi_event->gpi_ere.dword[2],
1408 gpi_event->gpi_ere.dword[3]);
1409
1410 switch (type) {
1411 case XFER_COMPLETE_EV_TYPE:
1412 gpii_chan = &gpii->gpii_chan[chid];
1413 gpi_process_xfer_compl_event(gpii_chan,
1414 &gpi_event->xfer_compl_event);
1415 break;
1416 case STALE_EV_TYPE:
1417 GPII_VERB(gpii, GPI_DBG_COMMON,
1418 "stale event, not processing\n");
1419 break;
1420 case IMMEDIATE_DATA_EV_TYPE:
1421 gpii_chan = &gpii->gpii_chan[chid];
1422 gpi_process_imed_data_event(gpii_chan,
1423 &gpi_event->immediate_data_event);
1424 break;
1425 case QUP_NOTIF_EV_TYPE:
1426 gpii_chan = &gpii->gpii_chan[chid];
1427 gpi_process_qup_notif_event(gpii_chan,
1428 &gpi_event->qup_notif_event);
1429 break;
1430 default:
1431 GPII_VERB(gpii, GPI_DBG_COMMON,
1432 "not supported event type:0x%x\n",
1433 type);
1434 }
1435 gpi_ring_recycle_ev_element(ev_ring);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001436 }
1437 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1438
1439 /* clear pending IEOB events */
Sujeev Diasa29e9602017-11-29 22:13:17 -08001440 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001441
1442 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
Sujeev Diasa29e9602017-11-29 22:13:17 -08001443 rp = to_virtual(ev_ring, cntxt_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001444
Sujeev Diasa29e9602017-11-29 22:13:17 -08001445 } while (rp != ev_ring->rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001446
Sujeev Diasa29e9602017-11-29 22:13:17 -08001447 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:%pa\n", &cntxt_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001448}
1449
1450/* processing events using tasklet */
1451static void gpi_ev_tasklet(unsigned long data)
1452{
1453 struct gpii *gpii = (struct gpii *)data;
1454
1455 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1456
1457 read_lock_bh(&gpii->pm_lock);
1458 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1459 read_unlock_bh(&gpii->pm_lock);
1460 GPII_ERR(gpii, GPI_DBG_COMMON,
1461 "not processing any events, pm_state:%s\n",
1462 TO_GPI_PM_STR(gpii->pm_state));
1463 return;
1464 }
1465
1466 /* process the events */
1467 gpi_process_events(gpii);
1468
1469 /* enable IEOB, switching back to interrupts */
1470 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1471 read_unlock_bh(&gpii->pm_lock);
1472
1473 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1474}
1475
1476/* marks all pending events for the channel as stale */
1477void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1478{
1479 struct gpii *gpii = gpii_chan->gpii;
1480 struct gpi_ring *ev_ring = &gpii->ev_ring;
1481 void *ev_rp;
1482 u32 cntxt_rp, local_rp;
1483
1484 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1485 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1486
1487 ev_rp = ev_ring->rp;
1488 local_rp = (u32)to_physical(ev_ring, ev_rp);
1489 while (local_rp != cntxt_rp) {
1490 union gpi_event *gpi_event = ev_rp;
1491 u32 chid = gpi_event->xfer_compl_event.chid;
1492
1493 if (chid == gpii_chan->chid)
1494 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1495 ev_rp += ev_ring->el_size;
1496 if (ev_rp >= (ev_ring->base + ev_ring->len))
1497 ev_rp = ev_ring->base;
1498 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1499 local_rp = (u32)to_physical(ev_ring, ev_rp);
1500 }
1501}
1502
1503/* reset sw state and issue channel reset or de-alloc */
1504static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1505{
1506 struct gpii *gpii = gpii_chan->gpii;
1507 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001508 unsigned long flags;
1509 LIST_HEAD(list);
1510 int ret;
1511
1512 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1513 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1514 if (ret) {
1515 GPII_ERR(gpii, gpii_chan->chid,
1516 "Error with cmd:%s ret:%d\n",
1517 TO_GPI_CMD_STR(gpi_cmd), ret);
1518 return ret;
1519 }
1520
1521 /* initialize the local ring ptrs */
1522 ch_ring->rp = ch_ring->base;
1523 ch_ring->wp = ch_ring->base;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001524
1525 /* visible to other cores */
1526 smp_wmb();
1527
1528 /* check event ring for any stale events */
1529 write_lock_irq(&gpii->pm_lock);
1530 gpi_mark_stale_events(gpii_chan);
1531
1532 /* remove all async descriptors */
1533 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1534 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1535 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1536 write_unlock_irq(&gpii->pm_lock);
1537 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1538
1539 return 0;
1540}
1541
1542static int gpi_start_chan(struct gpii_chan *gpii_chan)
1543{
1544 struct gpii *gpii = gpii_chan->gpii;
1545 int ret;
1546
1547 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1548
1549 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1550 if (ret) {
1551 GPII_ERR(gpii, gpii_chan->chid,
1552 "Error with cmd:%s ret:%d\n",
1553 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1554 return ret;
1555 }
1556
1557 /* gpii CH is active now */
1558 write_lock_irq(&gpii->pm_lock);
1559 gpii_chan->pm_state = ACTIVE_STATE;
1560 write_unlock_irq(&gpii->pm_lock);
1561
1562 return 0;
1563}
1564
1565/* allocate and configure the transfer channel */
1566static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1567{
1568 struct gpii *gpii = gpii_chan->gpii;
1569 struct gpi_ring *ring = &gpii_chan->ch_ring;
1570 int i;
1571 int ret;
1572 struct {
1573 void *base;
1574 int offset;
1575 u32 val;
1576 } ch_reg[] = {
1577 {
1578 gpii_chan->ch_cntxt_base_reg,
1579 CNTXT_0_CONFIG,
1580 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1581 gpii_chan->dir,
1582 GPI_CHTYPE_PROTO_GPI),
1583 },
1584 {
1585 gpii_chan->ch_cntxt_base_reg,
1586 CNTXT_1_R_LENGTH,
1587 ring->len,
1588 },
1589 {
1590 gpii_chan->ch_cntxt_base_reg,
1591 CNTXT_2_RING_BASE_LSB,
1592 (u32)ring->phys_addr,
1593 },
1594 {
1595 gpii_chan->ch_cntxt_base_reg,
1596 CNTXT_3_RING_BASE_MSB,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301597 MSM_GPI_RING_PHYS_ADDR_UPPER(ring),
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001598 },
1599 { /* program MSB of DB register with ring base */
1600 gpii_chan->ch_cntxt_db_reg,
1601 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301602 MSM_GPI_RING_PHYS_ADDR_UPPER(ring),
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001603 },
1604 {
1605 gpii->regs,
1606 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1607 gpii_chan->chid),
1608 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1609 gpii_chan->protocol,
1610 gpii_chan->seid),
1611 },
1612 {
1613 gpii->regs,
1614 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1615 gpii_chan->chid),
1616 0,
1617 },
1618 {
1619 gpii->regs,
1620 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1621 gpii_chan->chid),
1622 0,
1623 },
1624 {
1625 gpii->regs,
1626 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1627 gpii_chan->chid),
1628 0,
1629 },
1630 {
1631 gpii->regs,
1632 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1633 gpii_chan->chid),
1634 1,
1635 },
1636 { NULL },
1637 };
1638
1639 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1640
1641 if (send_alloc_cmd) {
1642 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1643 if (ret) {
1644 GPII_ERR(gpii, gpii_chan->chid,
1645 "Error with cmd:%s ret:%d\n",
1646 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1647 return ret;
1648 }
1649 }
1650
1651 /* program channel cntxt registers */
1652 for (i = 0; ch_reg[i].base; i++)
1653 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1654 ch_reg[i].val);
1655 /* flush all the writes */
1656 wmb();
1657 return 0;
1658}
1659
1660/* allocate and configure event ring */
1661static int gpi_alloc_ev_chan(struct gpii *gpii)
1662{
1663 struct gpi_ring *ring = &gpii->ev_ring;
1664 int i;
1665 int ret;
1666 struct {
1667 void *base;
1668 int offset;
1669 u32 val;
1670 } ev_reg[] = {
1671 {
1672 gpii->ev_cntxt_base_reg,
1673 CNTXT_0_CONFIG,
1674 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1675 GPI_INTTYPE_IRQ,
1676 GPI_CHTYPE_GPI_EV),
1677 },
1678 {
1679 gpii->ev_cntxt_base_reg,
1680 CNTXT_1_R_LENGTH,
1681 ring->len,
1682 },
1683 {
1684 gpii->ev_cntxt_base_reg,
1685 CNTXT_2_RING_BASE_LSB,
1686 (u32)ring->phys_addr,
1687 },
1688 {
1689 gpii->ev_cntxt_base_reg,
1690 CNTXT_3_RING_BASE_MSB,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301691 MSM_GPI_RING_PHYS_ADDR_UPPER(ring),
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001692 },
1693 {
1694 /* program db msg with ring base msb */
1695 gpii->ev_cntxt_db_reg,
1696 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301697 MSM_GPI_RING_PHYS_ADDR_UPPER(ring),
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001698 },
1699 {
1700 gpii->ev_cntxt_base_reg,
1701 CNTXT_8_RING_INT_MOD,
1702 0,
1703 },
1704 {
1705 gpii->ev_cntxt_base_reg,
1706 CNTXT_10_RING_MSI_LSB,
1707 0,
1708 },
1709 {
1710 gpii->ev_cntxt_base_reg,
1711 CNTXT_11_RING_MSI_MSB,
1712 0,
1713 },
1714 {
1715 gpii->ev_cntxt_base_reg,
1716 CNTXT_8_RING_INT_MOD,
1717 0,
1718 },
1719 {
1720 gpii->ev_cntxt_base_reg,
1721 CNTXT_12_RING_RP_UPDATE_LSB,
1722 0,
1723 },
1724 {
1725 gpii->ev_cntxt_base_reg,
1726 CNTXT_13_RING_RP_UPDATE_MSB,
1727 0,
1728 },
1729 { NULL },
1730 };
1731
1732 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1733
1734 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1735 if (ret) {
1736 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1737 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1738 return ret;
1739 }
1740
1741 /* program event context */
1742 for (i = 0; ev_reg[i].base; i++)
1743 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1744 ev_reg[i].val);
1745
1746 /* add events to ring */
1747 ring->wp = (ring->base + ring->len - ring->el_size);
1748
1749 /* flush all the writes */
1750 wmb();
1751
1752 /* gpii is active now */
1753 write_lock_irq(&gpii->pm_lock);
1754 gpii->pm_state = ACTIVE_STATE;
1755 write_unlock_irq(&gpii->pm_lock);
1756 gpi_write_ev_db(gpii, ring, ring->wp);
1757
1758 return 0;
1759}
1760
1761/* calculate # of ERE/TRE available to queue */
1762static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1763{
1764 int elements = 0;
1765
1766 if (ring->wp < ring->rp)
1767 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1768 else {
1769 elements = (ring->rp - ring->base) / ring->el_size;
1770 elements += ((ring->base + ring->len - ring->wp) /
1771 ring->el_size) - 1;
1772 }
1773
1774 return elements;
1775}
1776
1777static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1778{
1779
1780 if (gpi_ring_num_elements_avail(ring) <= 0)
1781 return -ENOMEM;
1782
1783 *wp = ring->wp;
1784 ring->wp += ring->el_size;
1785 if (ring->wp >= (ring->base + ring->len))
1786 ring->wp = ring->base;
1787
1788 /* visible to other cores */
1789 smp_wmb();
1790
1791 return 0;
1792}
1793
1794static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1795{
1796 /* Update the WP */
1797 ring->wp += ring->el_size;
1798 if (ring->wp >= (ring->base + ring->len))
1799 ring->wp = ring->base;
1800
1801 /* Update the RP */
1802 ring->rp += ring->el_size;
1803 if (ring->rp >= (ring->base + ring->len))
1804 ring->rp = ring->base;
1805
1806 /* visible to other cores */
1807 smp_wmb();
1808}
1809
1810static void gpi_free_ring(struct gpi_ring *ring,
1811 struct gpii *gpii)
1812{
Sujeev Dias8fc26002017-11-29 20:51:40 -08001813 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1814 ring->pre_aligned, ring->dma_handle);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001815 memset(ring, 0, sizeof(*ring));
1816}
1817
1818/* allocate memory for transfer and event rings */
1819static int gpi_alloc_ring(struct gpi_ring *ring,
1820 u32 elements,
1821 u32 el_size,
Sujeev Dias8fc26002017-11-29 20:51:40 -08001822 struct gpii *gpii)
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001823{
1824 u64 len = elements * el_size;
1825 int bit;
1826
Sujeev Dias8fc26002017-11-29 20:51:40 -08001827 /* ring len must be power of 2 */
1828 bit = find_last_bit((unsigned long *)&len, 32);
1829 if (((1 << bit) - 1) & len)
1830 bit++;
1831 len = 1 << bit;
1832 ring->alloc_size = (len + (len - 1));
1833 GPII_INFO(gpii, GPI_DBG_COMMON,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301834 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zx\n",
Sujeev Dias8fc26002017-11-29 20:51:40 -08001835 elements, el_size, (elements * el_size), len,
1836 ring->alloc_size);
1837 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1838 ring->alloc_size,
1839 &ring->dma_handle, GFP_KERNEL);
1840 if (!ring->pre_aligned) {
1841 GPII_CRITIC(gpii, GPI_DBG_COMMON,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301842 "could not alloc size:%zx mem for ring\n",
Sujeev Dias8fc26002017-11-29 20:51:40 -08001843 ring->alloc_size);
1844 return -ENOMEM;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001845 }
1846
Sujeev Dias8fc26002017-11-29 20:51:40 -08001847 /* align the physical mem */
1848 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1849 ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001850 ring->rp = ring->base;
1851 ring->wp = ring->base;
1852 ring->len = len;
1853 ring->el_size = el_size;
1854 ring->elements = ring->len / ring->el_size;
1855 memset(ring->base, 0, ring->len);
1856 ring->configured = true;
1857
1858 /* update to other cores */
1859 smp_wmb();
1860
1861 GPII_INFO(gpii, GPI_DBG_COMMON,
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05301862 "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
1863 &ring->dma_handle, &ring->phys_addr, ring->len, ring->el_size,
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001864 ring->elements);
1865
1866 return 0;
1867}
1868
1869/* copy tre into transfer ring */
1870static void gpi_queue_xfer(struct gpii *gpii,
1871 struct gpii_chan *gpii_chan,
1872 struct msm_gpi_tre *gpi_tre,
Sujeev Dias8fc26002017-11-29 20:51:40 -08001873 void **wp)
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001874{
1875 struct msm_gpi_tre *ch_tre;
1876 int ret;
1877
1878 /* get next tre location we can copy */
1879 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1880 if (unlikely(ret)) {
1881 GPII_CRITIC(gpii, gpii_chan->chid,
1882 "Error adding ring element to xfer ring\n");
1883 return;
1884 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001885
1886 /* copy the tre info */
1887 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001888 *wp = ch_tre;
1889}
1890
1891/* reset and restart transfer channel */
1892int gpi_terminate_all(struct dma_chan *chan)
1893{
1894 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1895 struct gpii *gpii = gpii_chan->gpii;
1896 int schid, echid, i;
1897 int ret = 0;
1898
1899 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1900 mutex_lock(&gpii->ctrl_lock);
1901
1902 /*
1903 * treat both channels as a group if its protocol is not UART
1904 * STOP, RESET, or START needs to be in lockstep
1905 */
1906 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1907 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1908 MAX_CHANNELS_PER_GPII;
1909
1910 /* stop the channel */
1911 for (i = schid; i < echid; i++) {
1912 gpii_chan = &gpii->gpii_chan[i];
1913
1914 /* disable ch state so no more TRE processing */
1915 write_lock_irq(&gpii->pm_lock);
1916 gpii_chan->pm_state = PREPARE_TERMINATE;
1917 write_unlock_irq(&gpii->pm_lock);
1918
1919 /* send command to Stop the channel */
1920 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1921 if (ret)
1922 GPII_ERR(gpii, gpii_chan->chid,
1923 "Error Stopping Channel:%d resetting anyway\n",
1924 ret);
1925 }
1926
1927 /* reset the channels (clears any pending tre) */
1928 for (i = schid; i < echid; i++) {
1929 gpii_chan = &gpii->gpii_chan[i];
1930
1931 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1932 if (ret) {
1933 GPII_ERR(gpii, gpii_chan->chid,
1934 "Error resetting channel ret:%d\n", ret);
1935 goto terminate_exit;
1936 }
1937
1938 /* reprogram channel CNTXT */
1939 ret = gpi_alloc_chan(gpii_chan, false);
1940 if (ret) {
1941 GPII_ERR(gpii, gpii_chan->chid,
1942 "Error alloc_channel ret:%d\n", ret);
1943 goto terminate_exit;
1944 }
1945 }
1946
1947 /* restart the channels */
1948 for (i = schid; i < echid; i++) {
1949 gpii_chan = &gpii->gpii_chan[i];
1950
1951 ret = gpi_start_chan(gpii_chan);
1952 if (ret) {
1953 GPII_ERR(gpii, gpii_chan->chid,
1954 "Error Starting Channel ret:%d\n", ret);
1955 goto terminate_exit;
1956 }
1957 }
1958
1959terminate_exit:
1960 mutex_unlock(&gpii->ctrl_lock);
1961 return ret;
1962}
1963
1964/* pause dma transfer for all channels */
1965static int gpi_pause(struct dma_chan *chan)
1966{
1967 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1968 struct gpii *gpii = gpii_chan->gpii;
1969 int i, ret;
1970
1971 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1972 mutex_lock(&gpii->ctrl_lock);
1973
1974 /*
1975 * pause/resume are per gpii not per channel, so
1976 * client needs to call pause only once
1977 */
1978 if (gpii->pm_state == PAUSE_STATE) {
1979 GPII_INFO(gpii, gpii_chan->chid,
1980 "channel is already paused\n");
1981 mutex_unlock(&gpii->ctrl_lock);
1982 return 0;
1983 }
1984
1985 /* send stop command to stop the channels */
1986 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
1987 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
1988 if (ret) {
1989 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
1990 "Error stopping chan, ret:%d\n", ret);
1991 mutex_unlock(&gpii->ctrl_lock);
1992 return ret;
1993 }
1994 }
1995
1996 disable_irq(gpii->irq);
1997
1998 /* Wait for threads to complete out */
1999 tasklet_kill(&gpii->ev_task);
2000
2001 write_lock_irq(&gpii->pm_lock);
2002 gpii->pm_state = PAUSE_STATE;
2003 write_unlock_irq(&gpii->pm_lock);
2004 mutex_unlock(&gpii->ctrl_lock);
2005
2006 return 0;
2007}
2008
2009/* resume dma transfer */
2010static int gpi_resume(struct dma_chan *chan)
2011{
2012 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2013 struct gpii *gpii = gpii_chan->gpii;
2014 int i;
2015 int ret;
2016
2017 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2018
2019 mutex_lock(&gpii->ctrl_lock);
2020 if (gpii->pm_state == ACTIVE_STATE) {
2021 GPII_INFO(gpii, gpii_chan->chid,
2022 "channel is already active\n");
2023 mutex_unlock(&gpii->ctrl_lock);
2024 return 0;
2025 }
2026
2027 enable_irq(gpii->irq);
2028
2029 /* send start command to start the channels */
2030 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2031 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2032 if (ret) {
2033 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2034 "Erro starting chan, ret:%d\n", ret);
2035 mutex_unlock(&gpii->ctrl_lock);
2036 return ret;
2037 }
2038 }
2039
2040 write_lock_irq(&gpii->pm_lock);
2041 gpii->pm_state = ACTIVE_STATE;
2042 write_unlock_irq(&gpii->pm_lock);
2043 mutex_unlock(&gpii->ctrl_lock);
2044
2045 return 0;
2046}
2047
2048void gpi_desc_free(struct virt_dma_desc *vd)
2049{
2050 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2051
2052 kfree(gpi_desc);
2053}
2054
2055/* copy tre into transfer ring */
2056struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2057 struct scatterlist *sgl,
2058 unsigned int sg_len,
2059 enum dma_transfer_direction direction,
2060 unsigned long flags,
2061 void *context)
2062{
2063 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2064 struct gpii *gpii = gpii_chan->gpii;
Sujeev Dias8fc26002017-11-29 20:51:40 -08002065 u32 nr;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002066 u32 nr_req = 0;
2067 int i, j;
2068 struct scatterlist *sg;
2069 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002070 void *tre, *wp = NULL;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002071 const gfp_t gfp = GFP_ATOMIC;
2072 struct gpi_desc *gpi_desc;
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05302073#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
2074 phys_addr_t p_wp, p_rp;
2075#endif
2076
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002077
2078 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2079
2080 if (!is_slave_direction(direction)) {
2081 GPII_ERR(gpii, gpii_chan->chid,
2082 "invalid dma direction: %d\n", direction);
2083 return NULL;
2084 }
2085
2086 /* calculate # of elements required & available */
2087 nr = gpi_ring_num_elements_avail(ch_ring);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002088 for_each_sg(sgl, sg, sg_len, i) {
2089 GPII_VERB(gpii, gpii_chan->chid,
2090 "%d of %u len:%u\n", i, sg_len, sg->length);
2091 nr_req += (sg->length / ch_ring->el_size);
2092 }
Sujeev Dias8fc26002017-11-29 20:51:40 -08002093 GPII_VERB(gpii, gpii_chan->chid, "el avail:%u req:%u\n", nr, nr_req);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002094
Sujeev Dias8fc26002017-11-29 20:51:40 -08002095 if (nr < nr_req) {
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002096 GPII_ERR(gpii, gpii_chan->chid,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002097 "not enough space in ring, avail:%u required:%u\n",
2098 nr, nr_req);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002099 return NULL;
2100 }
2101
2102 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2103 if (!gpi_desc) {
2104 GPII_ERR(gpii, gpii_chan->chid,
2105 "out of memory for descriptor\n");
2106 return NULL;
2107 }
2108
2109 /* copy each tre into transfer ring */
2110 for_each_sg(sgl, sg, sg_len, i)
2111 for (j = 0, tre = sg_virt(sg); j < sg->length;
2112 j += ch_ring->el_size, tre += ch_ring->el_size)
Sujeev Dias8fc26002017-11-29 20:51:40 -08002113 gpi_queue_xfer(gpii, gpii_chan, tre, &wp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002114
2115 /* set up the descriptor */
2116 gpi_desc->db = ch_ring->wp;
2117 gpi_desc->wp = wp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002118 gpi_desc->gpii_chan = gpii_chan;
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05302119#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
2120 p_wp = to_physical(ch_ring, ch_ring->wp);
2121 p_rp = to_physical(ch_ring, ch_ring->rp);
2122#endif
2123 GPII_VERB(gpii, gpii_chan->chid, "exit wp:%pa rp:%pa\n",
2124 &p_wp, &p_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002125
2126 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2127}
2128
2129/* rings transfer ring db to being transfer */
2130static void gpi_issue_pending(struct dma_chan *chan)
2131{
2132 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2133 struct gpii *gpii = gpii_chan->gpii;
2134 unsigned long flags, pm_lock_flags;
2135 struct virt_dma_desc *vd = NULL;
2136 struct gpi_desc *gpi_desc;
2137
2138 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2139
2140 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2141
2142 /* move all submitted discriptors to issued list */
2143 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2144 if (vchan_issue_pending(&gpii_chan->vc))
2145 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2146 struct virt_dma_desc, node);
2147 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2148
2149 /* nothing to do list is empty */
2150 if (!vd) {
2151 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2152 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2153 return;
2154 }
2155
2156 gpi_desc = to_gpi_desc(vd);
2157 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2158 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2159}
2160
2161/* configure or issue async command */
2162static int gpi_config(struct dma_chan *chan,
2163 struct dma_slave_config *config)
2164{
2165 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2166 struct gpii *gpii = gpii_chan->gpii;
2167 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2168 const int ev_factor = gpii->gpi_dev->ev_factor;
2169 u32 elements;
2170 int i = 0;
2171 int ret = 0;
2172
2173 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2174 if (!gpi_ctrl) {
2175 GPII_ERR(gpii, gpii_chan->chid,
2176 "no config ctrl data provided");
2177 return -EINVAL;
2178 }
2179
2180 mutex_lock(&gpii->ctrl_lock);
2181
2182 switch (gpi_ctrl->cmd) {
2183 case MSM_GPI_INIT:
2184 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2185
2186 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2187 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2188 gpii_chan->pm_state = CONFIG_STATE;
2189
2190 /* check if both channels are configured before continue */
2191 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2192 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2193 goto exit_gpi_init;
2194
2195 /* configure to highest priority from two channels */
2196 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2197 gpii->gpii_chan[1].priority);
2198
2199 /* protocol must be same for both channels */
2200 if (gpii->gpii_chan[0].protocol !=
2201 gpii->gpii_chan[1].protocol) {
2202 GPII_ERR(gpii, gpii_chan->chid,
2203 "protocol did not match protocol %u != %u\n",
2204 gpii->gpii_chan[0].protocol,
2205 gpii->gpii_chan[1].protocol);
2206 ret = -EINVAL;
2207 goto exit_gpi_init;
2208 }
2209 gpii->protocol = gpii_chan->protocol;
2210
2211 /* allocate memory for event ring */
2212 elements = max(gpii->gpii_chan[0].req_tres,
2213 gpii->gpii_chan[1].req_tres);
2214 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002215 sizeof(union gpi_event), gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002216 if (ret) {
2217 GPII_ERR(gpii, gpii_chan->chid,
2218 "error allocating mem for ev ring\n");
2219 goto exit_gpi_init;
2220 }
2221
2222 /* configure interrupts */
2223 write_lock_irq(&gpii->pm_lock);
2224 gpii->pm_state = PREPARE_HARDWARE;
2225 write_unlock_irq(&gpii->pm_lock);
2226 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2227 if (ret) {
2228 GPII_ERR(gpii, gpii_chan->chid,
2229 "error config. interrupts, ret:%d\n", ret);
2230 goto error_config_int;
2231 }
2232
2233 /* allocate event rings */
2234 ret = gpi_alloc_ev_chan(gpii);
2235 if (ret) {
2236 GPII_ERR(gpii, gpii_chan->chid,
2237 "error alloc_ev_chan:%d\n", ret);
2238 goto error_alloc_ev_ring;
2239 }
2240
2241 /* Allocate all channels */
2242 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2243 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2244 if (ret) {
2245 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2246 "Error allocating chan:%d\n", ret);
2247 goto error_alloc_chan;
2248 }
2249 }
2250
2251 /* start channels */
2252 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2253 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2254 if (ret) {
2255 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2256 "Error start chan:%d\n", ret);
2257 goto error_start_chan;
2258 }
2259 }
2260
2261 break;
2262 case MSM_GPI_CMD_UART_SW_STALE:
2263 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2264 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2265 break;
2266 case MSM_GPI_CMD_UART_RFR_READY:
2267 GPII_INFO(gpii, gpii_chan->chid,
2268 "sending UART RFR READY cmd\n");
2269 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2270 break;
2271 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2272 GPII_INFO(gpii, gpii_chan->chid,
2273 "sending UART RFR READY NOT READY cmd\n");
2274 ret = gpi_send_cmd(gpii, gpii_chan,
2275 GPI_CH_CMD_UART_RFR_NOT_READY);
2276 break;
2277 default:
2278 GPII_ERR(gpii, gpii_chan->chid,
2279 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2280 ret = -EINVAL;
2281 }
2282
2283 mutex_unlock(&gpii->ctrl_lock);
2284 return ret;
2285
2286error_start_chan:
2287 for (i = i - 1; i >= 0; i++) {
2288 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2289 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2290 }
2291 i = 2;
2292error_alloc_chan:
2293 for (i = i - 1; i >= 0; i--)
2294 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2295error_alloc_ev_ring:
2296 gpi_disable_interrupts(gpii);
2297error_config_int:
2298 gpi_free_ring(&gpii->ev_ring, gpii);
2299exit_gpi_init:
2300 mutex_unlock(&gpii->ctrl_lock);
2301 return ret;
2302}
2303
2304/* release all channel resources */
2305static void gpi_free_chan_resources(struct dma_chan *chan)
2306{
2307 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2308 struct gpii *gpii = gpii_chan->gpii;
2309 enum gpi_pm_state cur_state;
2310 int ret, i;
2311
2312 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2313
2314 mutex_lock(&gpii->ctrl_lock);
2315
2316 cur_state = gpii_chan->pm_state;
2317
2318 /* disable ch state so no more TRE processing for this channel */
2319 write_lock_irq(&gpii->pm_lock);
2320 gpii_chan->pm_state = PREPARE_TERMINATE;
2321 write_unlock_irq(&gpii->pm_lock);
2322
2323 /* attemp to do graceful hardware shutdown */
2324 if (cur_state == ACTIVE_STATE) {
2325 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2326 if (ret)
2327 GPII_ERR(gpii, gpii_chan->chid,
2328 "error stopping channel:%d\n", ret);
2329
2330 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2331 if (ret)
2332 GPII_ERR(gpii, gpii_chan->chid,
2333 "error resetting channel:%d\n", ret);
2334
2335 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2336 }
2337
2338 /* free all allocated memory */
2339 gpi_free_ring(&gpii_chan->ch_ring, gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002340 vchan_free_chan_resources(&gpii_chan->vc);
2341
2342 write_lock_irq(&gpii->pm_lock);
2343 gpii_chan->pm_state = DISABLE_STATE;
2344 write_unlock_irq(&gpii->pm_lock);
2345
2346 /* if other rings are still active exit */
2347 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2348 if (gpii->gpii_chan[i].ch_ring.configured)
2349 goto exit_free;
2350
2351 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2352
2353 /* deallocate EV Ring */
2354 cur_state = gpii->pm_state;
2355 write_lock_irq(&gpii->pm_lock);
2356 gpii->pm_state = PREPARE_TERMINATE;
2357 write_unlock_irq(&gpii->pm_lock);
2358
2359 /* wait for threads to complete out */
2360 tasklet_kill(&gpii->ev_task);
2361
2362 /* send command to de allocate event ring */
2363 if (cur_state == ACTIVE_STATE)
2364 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2365
2366 gpi_free_ring(&gpii->ev_ring, gpii);
2367
2368 /* disable interrupts */
2369 if (cur_state == ACTIVE_STATE)
2370 gpi_disable_interrupts(gpii);
2371
2372 /* set final state to disable */
2373 write_lock_irq(&gpii->pm_lock);
2374 gpii->pm_state = DISABLE_STATE;
2375 write_unlock_irq(&gpii->pm_lock);
2376
2377exit_free:
2378 mutex_unlock(&gpii->ctrl_lock);
2379}
2380
2381/* allocate channel resources */
2382static int gpi_alloc_chan_resources(struct dma_chan *chan)
2383{
2384 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2385 struct gpii *gpii = gpii_chan->gpii;
2386 int ret;
2387
2388 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2389
2390 mutex_lock(&gpii->ctrl_lock);
2391
2392 /* allocate memory for transfer ring */
2393 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002394 sizeof(struct msm_gpi_tre), gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002395 if (ret) {
2396 GPII_ERR(gpii, gpii_chan->chid,
2397 "error allocating xfer ring, ret:%d\n", ret);
2398 goto xfer_alloc_err;
2399 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002400 mutex_unlock(&gpii->ctrl_lock);
2401
2402 return 0;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002403xfer_alloc_err:
2404 mutex_unlock(&gpii->ctrl_lock);
2405
2406 return ret;
2407}
2408
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002409static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2410{
2411 int gpii;
2412 struct gpii_chan *tx_chan, *rx_chan;
2413
2414 /* check if same seid is already configured for another chid */
2415 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2416 if (!((1 << gpii) & gpi_dev->gpii_mask))
2417 continue;
2418
2419 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2420 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2421
2422 if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2423 return gpii;
2424 if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2425 return gpii;
2426 }
2427
2428 /* no channels configured with same seid, return next avail gpii */
2429 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2430 if (!((1 << gpii) & gpi_dev->gpii_mask))
2431 continue;
2432
2433 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2434 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2435
2436 /* check if gpii is configured */
2437 if (tx_chan->vc.chan.client_count ||
2438 rx_chan->vc.chan.client_count)
2439 continue;
2440
2441 /* found a free gpii */
2442 return gpii;
2443 }
2444
2445 /* no gpii instance available to use */
2446 return -EIO;
2447}
2448
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002449/* gpi_of_dma_xlate: open client requested channel */
2450static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2451 struct of_dma *of_dma)
2452{
2453 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002454 u32 seid, chid;
2455 int gpii;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002456 struct gpii_chan *gpii_chan;
2457
2458 if (args->args_count < REQ_OF_DMA_ARGS) {
2459 GPI_ERR(gpi_dev,
2460 "gpii require minimum 6 args, client passed:%d args\n",
2461 args->args_count);
2462 return NULL;
2463 }
2464
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002465 chid = args->args[0];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002466 if (chid >= MAX_CHANNELS_PER_GPII) {
2467 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2468 return NULL;
2469 }
2470
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002471 seid = args->args[1];
2472
2473 /* find next available gpii to use */
2474 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2475 if (gpii < 0) {
2476 GPI_ERR(gpi_dev, "no available gpii instances\n");
2477 return NULL;
2478 }
2479
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002480 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002481 if (gpii_chan->vc.chan.client_count) {
2482 GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
2483 gpii, chid, gpii_chan->seid);
2484 return NULL;
2485 }
2486
2487 /* get ring size, protocol, se_id, and priority */
2488 gpii_chan->seid = seid;
2489 gpii_chan->protocol = args->args[2];
2490 gpii_chan->req_tres = args->args[3];
2491 gpii_chan->priority = args->args[4];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002492
2493 GPI_LOG(gpi_dev,
2494 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2495 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2496 gpii_chan->protocol);
2497
2498 return dma_get_slave_channel(&gpii_chan->vc.chan);
2499}
2500
2501/* gpi_setup_debug - setup debug capabilities */
2502static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2503{
2504 char node_name[GPI_LABEL_SIZE];
2505 const umode_t mode = 0600;
2506 int i;
2507
2508 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2509 (u64)gpi_dev->res->start);
2510
2511 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2512 node_name, 0);
2513 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2514 if (!IS_ERR_OR_NULL(pdentry)) {
2515 snprintf(node_name, sizeof(node_name), "%llx",
2516 (u64)gpi_dev->res->start);
2517 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2518 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2519 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2520 &gpi_dev->ipc_log_lvl);
2521 debugfs_create_u32("klog_lvl", mode,
2522 gpi_dev->dentry, &gpi_dev->klog_lvl);
2523 }
2524 }
2525
2526 for (i = 0; i < gpi_dev->max_gpii; i++) {
2527 struct gpii *gpii;
2528
2529 if (!((1 << i) & gpi_dev->gpii_mask))
2530 continue;
2531
2532 gpii = &gpi_dev->gpiis[i];
2533 snprintf(gpii->label, sizeof(gpii->label),
2534 "%s%llx_gpii%d",
2535 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2536 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2537 gpii->label, 0);
2538 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2539 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2540
2541 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2542 continue;
2543
2544 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2545 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2546 if (IS_ERR_OR_NULL(gpii->dentry))
2547 continue;
2548
2549 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2550 &gpii->ipc_log_lvl);
2551 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2552 &gpii->klog_lvl);
2553 }
2554}
2555
Sujeev Dias69484212017-08-31 10:06:53 -07002556static struct dma_iommu_mapping *gpi_create_mapping(struct gpi_dev *gpi_dev)
2557{
2558 dma_addr_t base;
2559 size_t size;
2560
2561 /*
2562 * If S1_BYPASS enabled then iommu space is not used, however framework
2563 * still require clients to create a mapping space before attaching. So
2564 * set to smallest size required by iommu framework.
2565 */
2566 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2567 base = 0;
2568 size = PAGE_SIZE;
2569 } else {
2570 base = gpi_dev->iova_base;
2571 size = gpi_dev->iova_size;
2572 }
2573
Siva Kumar Akkireddi15fb3942018-08-02 13:33:35 +05302574 GPI_LOG(gpi_dev, "Creating iommu mapping of base:%pad size:%zx\n",
2575 &base, size);
Sujeev Dias69484212017-08-31 10:06:53 -07002576
2577 return arm_iommu_create_mapping(&platform_bus_type, base, size);
2578}
2579
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002580static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2581{
Sujeev Dias69484212017-08-31 10:06:53 -07002582 struct dma_iommu_mapping *mapping = NULL;
2583 int ret;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002584
Sujeev Dias69484212017-08-31 10:06:53 -07002585 if (gpi_dev->smmu_cfg) {
2586
2587 /* create mapping table */
2588 mapping = gpi_create_mapping(gpi_dev);
2589 if (IS_ERR(mapping)) {
2590 GPI_ERR(gpi_dev,
2591 "Failed to create iommu mapping, ret:%ld\n",
2592 PTR_ERR(mapping));
2593 return PTR_ERR(mapping);
2594 }
2595
2596 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2597 int s1_bypass = 1;
2598
2599 ret = iommu_domain_set_attr(mapping->domain,
2600 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
2601 if (ret) {
2602 GPI_ERR(gpi_dev,
2603 "Failed to set attr S1_BYPASS, ret:%d\n",
2604 ret);
2605 goto release_mapping;
2606 }
2607 }
2608
2609 if (gpi_dev->smmu_cfg & GPI_SMMU_FAST) {
2610 int fast = 1;
2611
2612 ret = iommu_domain_set_attr(mapping->domain,
2613 DOMAIN_ATTR_FAST, &fast);
2614 if (ret) {
2615 GPI_ERR(gpi_dev,
2616 "Failed to set attr FAST, ret:%d\n",
2617 ret);
2618 goto release_mapping;
2619 }
2620 }
2621
2622 if (gpi_dev->smmu_cfg & GPI_SMMU_ATOMIC) {
2623 int atomic = 1;
2624
2625 ret = iommu_domain_set_attr(mapping->domain,
2626 DOMAIN_ATTR_ATOMIC, &atomic);
2627 if (ret) {
2628 GPI_ERR(gpi_dev,
2629 "Failed to set attr ATOMIC, ret:%d\n",
2630 ret);
2631 goto release_mapping;
2632 }
2633 }
2634
2635 ret = arm_iommu_attach_device(gpi_dev->dev, mapping);
2636 if (ret) {
2637 GPI_ERR(gpi_dev,
2638 "Failed with iommu_attach, ret:%d\n", ret);
2639 goto release_mapping;
2640 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002641 }
2642
Siva Kumar Akkireddif14ef342018-07-20 17:17:55 +05302643 GPI_LOG(gpi_dev, "Setting dma mask to 64\n");
2644 ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002645 if (ret) {
Siva Kumar Akkireddif14ef342018-07-20 17:17:55 +05302646 GPI_ERR(gpi_dev, "Error setting dma_mask to 64, ret:%d\n", ret);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002647 goto error_set_mask;
2648 }
2649
2650 return ret;
2651
2652error_set_mask:
Sujeev Dias69484212017-08-31 10:06:53 -07002653 if (gpi_dev->smmu_cfg)
2654 arm_iommu_detach_device(gpi_dev->dev);
2655release_mapping:
2656 if (mapping)
2657 arm_iommu_release_mapping(mapping);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002658 return ret;
2659}
2660
2661static int gpi_probe(struct platform_device *pdev)
2662{
2663 struct gpi_dev *gpi_dev;
2664 int ret, i;
2665
2666 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2667 if (!gpi_dev)
2668 return -ENOMEM;
2669
2670 gpi_dev->dev = &pdev->dev;
2671 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2672 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2673 "gpi-top");
2674 if (!gpi_dev->res) {
2675 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2676 return -EINVAL;
2677 }
2678 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2679 resource_size(gpi_dev->res));
2680 if (!gpi_dev->regs) {
2681 GPI_ERR(gpi_dev, "IO remap failed\n");
2682 return -EFAULT;
2683 }
2684
2685 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2686 &gpi_dev->max_gpii);
2687 if (ret) {
2688 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2689 return ret;
2690 }
2691
2692 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2693 &gpi_dev->gpii_mask);
2694 if (ret) {
2695 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2696 return ret;
2697 }
2698
2699 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2700 &gpi_dev->ev_factor);
2701 if (ret) {
2702 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2703 return ret;
2704 }
2705
Sujeev Dias69484212017-08-31 10:06:53 -07002706 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,smmu-cfg",
2707 &gpi_dev->smmu_cfg);
2708 if (ret) {
2709 GPI_ERR(gpi_dev, "missing 'qcom,smmu-cfg' DT node\n");
2710 return ret;
2711 }
2712 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2713 u64 iova_range[2];
2714
2715 ret = of_property_count_elems_of_size(gpi_dev->dev->of_node,
2716 "qcom,iova-range",
2717 sizeof(iova_range));
2718 if (ret != 1) {
2719 GPI_ERR(gpi_dev,
2720 "missing or incorrect 'qcom,iova-range' DT node ret:%d\n",
2721 ret);
2722 }
2723
2724 ret = of_property_read_u64_array(gpi_dev->dev->of_node,
2725 "qcom,iova-range", iova_range,
2726 sizeof(iova_range) / sizeof(u64));
2727 if (ret) {
2728 GPI_ERR(gpi_dev,
2729 "could not read DT prop 'qcom,iova-range\n");
2730 return ret;
2731 }
2732 gpi_dev->iova_base = iova_range[0];
2733 gpi_dev->iova_size = iova_range[1];
2734 }
2735
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002736 ret = gpi_smmu_init(gpi_dev);
2737 if (ret) {
2738 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2739 return ret;
2740 }
2741
2742 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2743 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2744 GFP_KERNEL);
2745 if (!gpi_dev->gpiis)
2746 return -ENOMEM;
2747
2748
2749 /* setup all the supported gpii */
2750 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2751 for (i = 0; i < gpi_dev->max_gpii; i++) {
2752 struct gpii *gpii = &gpi_dev->gpiis[i];
2753 int chan;
2754
2755 if (!((1 << i) & gpi_dev->gpii_mask))
2756 continue;
2757
2758 /* set up ev cntxt register map */
2759 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2760 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2761 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2762 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2763 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2764 CNTXT_2_RING_BASE_LSB;
2765 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2766 CNTXT_4_RING_RP_LSB;
2767 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2768 CNTXT_6_RING_WP_LSB;
2769 gpii->ev_cmd_reg = gpi_dev->regs +
2770 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2771 gpii->ieob_src_reg = gpi_dev->regs +
2772 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2773 gpii->ieob_clr_reg = gpi_dev->regs +
2774 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2775
2776 /* set up irq */
2777 ret = platform_get_irq(pdev, i);
2778 if (ret < 0) {
2779 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2780 i, ret);
2781 return ret;
2782 }
2783 gpii->irq = ret;
2784
2785 /* set up channel specific register info */
2786 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2787 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2788
2789 /* set up ch cntxt register map */
2790 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2791 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2792 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2793 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2794 gpii_chan->ch_ring_base_lsb_reg =
2795 gpii_chan->ch_cntxt_base_reg +
2796 CNTXT_2_RING_BASE_LSB;
2797 gpii_chan->ch_ring_rp_lsb_reg =
2798 gpii_chan->ch_cntxt_base_reg +
2799 CNTXT_4_RING_RP_LSB;
2800 gpii_chan->ch_ring_wp_lsb_reg =
2801 gpii_chan->ch_cntxt_base_reg +
2802 CNTXT_6_RING_WP_LSB;
2803 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2804 GPI_GPII_n_CH_CMD_OFFS(i);
2805
2806 /* vchan setup */
2807 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2808 gpii_chan->vc.desc_free = gpi_desc_free;
2809 gpii_chan->chid = chan;
2810 gpii_chan->gpii = gpii;
2811 gpii_chan->dir = GPII_CHAN_DIR[chan];
2812 }
2813 mutex_init(&gpii->ctrl_lock);
2814 rwlock_init(&gpii->pm_lock);
2815 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2816 (unsigned long)gpii);
2817 init_completion(&gpii->cmd_completion);
2818 gpii->gpii_id = i;
2819 gpii->regs = gpi_dev->regs;
2820 gpii->gpi_dev = gpi_dev;
2821 atomic_set(&gpii->dbg_index, 0);
2822 }
2823
2824 platform_set_drvdata(pdev, gpi_dev);
2825
2826 /* clear and Set capabilities */
2827 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2828 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2829
2830 /* configure dmaengine apis */
2831 gpi_dev->dma_device.directions =
2832 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2833 gpi_dev->dma_device.residue_granularity =
2834 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2835 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2836 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2837 gpi_dev->dma_device.device_alloc_chan_resources =
2838 gpi_alloc_chan_resources;
2839 gpi_dev->dma_device.device_free_chan_resources =
2840 gpi_free_chan_resources;
2841 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2842 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2843 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2844 gpi_dev->dma_device.device_config = gpi_config;
2845 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2846 gpi_dev->dma_device.dev = gpi_dev->dev;
2847 gpi_dev->dma_device.device_pause = gpi_pause;
2848 gpi_dev->dma_device.device_resume = gpi_resume;
2849
2850 /* register with dmaengine framework */
2851 ret = dma_async_device_register(&gpi_dev->dma_device);
2852 if (ret) {
2853 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2854 return ret;
2855 }
2856
2857 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2858 gpi_of_dma_xlate, gpi_dev);
2859 if (ret) {
2860 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2861 return ret;
2862 }
2863
2864 /* setup debug capabilities */
2865 gpi_setup_debug(gpi_dev);
2866 GPI_LOG(gpi_dev, "probe success\n");
2867
2868 return ret;
2869}
2870
2871static const struct of_device_id gpi_of_match[] = {
2872 { .compatible = "qcom,gpi-dma" },
2873 {}
2874};
2875MODULE_DEVICE_TABLE(of, gpi_of_match);
2876
2877static struct platform_driver gpi_driver = {
2878 .probe = gpi_probe,
2879 .driver = {
2880 .name = GPI_DMA_DRV_NAME,
2881 .of_match_table = gpi_of_match,
2882 },
2883};
2884
2885static int __init gpi_init(void)
2886{
2887 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2888 return platform_driver_register(&gpi_driver);
2889}
2890module_init(gpi_init)
2891
2892MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2893MODULE_LICENSE("GPL v2");