blob: ed3d3785bc0f9e21f18a0073f8dc61110111a9d8 [file] [log] [blame]
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
59#define GPII_REG(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, \
62 ch, __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_VERB(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
70 pr_info("%s:%u:%s: " fmt, gpii->label, \
71 ch, __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_INFO(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_INFO) \
79 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86#define GPII_ERR(gpii, ch, fmt, ...) do { \
87 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
88 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
89 __func__, ##__VA_ARGS__); \
90 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
91 ipc_log_string(gpii->ilctxt, \
92 "ch:%u %s: " fmt, ch, \
93 __func__, ##__VA_ARGS__); \
94 } while (0)
95#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
96 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
97 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
98 __func__, ##__VA_ARGS__); \
99 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
100 ipc_log_string(gpii->ilctxt, \
101 "ch:%u %s: " fmt, ch, \
102 __func__, ##__VA_ARGS__); \
103 } while (0)
104
105enum DEBUG_LOG_LVL {
106 LOG_LVL_MASK_ALL,
107 LOG_LVL_CRITICAL,
108 LOG_LVL_ERROR,
109 LOG_LVL_INFO,
110 LOG_LVL_VERBOSE,
111 LOG_LVL_REG_ACCESS,
112};
113
114enum EV_PRIORITY {
115 EV_PRIORITY_ISR,
116 EV_PRIORITY_TASKLET,
117};
118
119#define GPI_DMA_DRV_NAME "gpi_dma"
120#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
121#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
122#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
123#define IPC_LOG_PAGES (40)
124#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
125#else
126#define IPC_LOG_PAGES (2)
127#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
128#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
129#endif
130
131#define GPI_LABEL_SIZE (256)
132#define GPI_DBG_COMMON (99)
133#define MAX_CHANNELS_PER_GPII (2)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700134#define GPI_TX_CHAN (0)
135#define GPI_RX_CHAN (1)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700136#define CMD_TIMEOUT_MS (50)
137#define STATE_IGNORE (U32_MAX)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700138#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700139
140struct __packed gpi_error_log_entry {
141 u32 routine : 4;
142 u32 type : 4;
143 u32 reserved0 : 4;
144 u32 code : 4;
145 u32 reserved1 : 3;
146 u32 chid : 5;
147 u32 reserved2 : 1;
148 u32 chtype : 1;
149 u32 ee : 1;
150};
151
152struct __packed xfer_compl_event {
153 u64 ptr;
154 u32 length : 24;
155 u8 code;
156 u16 status;
157 u8 type;
158 u8 chid;
159};
160
161struct __packed immediate_data_event {
162 u8 data_bytes[8];
163 u8 length : 4;
164 u8 resvd : 4;
165 u16 tre_index;
166 u8 code;
167 u16 status;
168 u8 type;
169 u8 chid;
170};
171
172struct __packed qup_notif_event {
173 u32 status;
174 u32 time;
175 u32 count :24;
176 u8 resvd;
177 u16 resvd1;
178 u8 type;
179 u8 chid;
180};
181
182struct __packed gpi_ere {
183 u32 dword[4];
184};
185
186enum GPI_EV_TYPE {
187 XFER_COMPLETE_EV_TYPE = 0x22,
188 IMMEDIATE_DATA_EV_TYPE = 0x30,
189 QUP_NOTIF_EV_TYPE = 0x31,
190 STALE_EV_TYPE = 0xFF,
191};
192
193union __packed gpi_event {
194 struct __packed xfer_compl_event xfer_compl_event;
195 struct __packed immediate_data_event immediate_data_event;
196 struct __packed qup_notif_event qup_notif_event;
197 struct __packed gpi_ere gpi_ere;
198};
199
200enum gpii_irq_settings {
201 DEFAULT_IRQ_SETTINGS,
202 MASK_IEOB_SETTINGS,
203};
204
205enum gpi_ev_state {
206 DEFAULT_EV_CH_STATE = 0,
207 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
208 EV_STATE_ALLOCATED,
209 MAX_EV_STATES
210};
211
212static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
213 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
214 [EV_STATE_ALLOCATED] = "ALLOCATED",
215};
216
217#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
218 "INVALID" : gpi_ev_state_str[state])
219
220enum gpi_ch_state {
221 DEFAULT_CH_STATE = 0x0,
222 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
223 CH_STATE_ALLOCATED = 0x1,
224 CH_STATE_STARTED = 0x2,
225 CH_STATE_STOPPED = 0x3,
226 CH_STATE_STOP_IN_PROC = 0x4,
227 CH_STATE_ERROR = 0xf,
228 MAX_CH_STATES
229};
230
231static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
232 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
233 [CH_STATE_ALLOCATED] = "ALLOCATED",
234 [CH_STATE_STARTED] = "STARTED",
235 [CH_STATE_STOPPED] = "STOPPED",
236 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
237 [CH_STATE_ERROR] = "ERROR",
238};
239
240#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
241 "INVALID" : gpi_ch_state_str[state])
242
243enum gpi_cmd {
244 GPI_CH_CMD_BEGIN,
245 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
246 GPI_CH_CMD_START,
247 GPI_CH_CMD_STOP,
248 GPI_CH_CMD_RESET,
249 GPI_CH_CMD_DE_ALLOC,
250 GPI_CH_CMD_UART_SW_STALE,
251 GPI_CH_CMD_UART_RFR_READY,
252 GPI_CH_CMD_UART_RFR_NOT_READY,
253 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
254 GPI_EV_CMD_BEGIN,
255 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
256 GPI_EV_CMD_RESET,
257 GPI_EV_CMD_DEALLOC,
258 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
259 GPI_MAX_CMD,
260};
261
262#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
263
264static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
265 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
266 [GPI_CH_CMD_START] = "CH START",
267 [GPI_CH_CMD_STOP] = "CH STOP",
268 [GPI_CH_CMD_RESET] = "CH_RESET",
269 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
270 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
271 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
272 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
273 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
274 [GPI_EV_CMD_RESET] = "EV RESET",
275 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
276};
277
278#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
279 gpi_cmd_str[cmd])
280
281static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
282 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
283 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
284 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
285 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
286 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
287 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
288};
289
290#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
291 "INVALID" : gpi_cb_event_str[event])
292
293enum se_protocol {
294 SE_PROTOCOL_SPI = 1,
295 SE_PROTOCOL_UART = 2,
296 SE_PROTOCOL_I2C = 3,
297 SE_MAX_PROTOCOL
298};
299
300/*
301 * @DISABLE_STATE: no register access allowed
302 * @CONFIG_STATE: client has configured the channel
303 * @PREP_HARDWARE: register access is allowed
304 * however, no processing EVENTS
305 * @ACTIVE_STATE: channels are fully operational
306 * @PREPARE_TERIMNATE: graceful termination of channels
307 * register access is allowed
308 * @PAUSE_STATE: channels are active, but not processing any events
309 */
310enum gpi_pm_state {
311 DISABLE_STATE,
312 CONFIG_STATE,
313 PREPARE_HARDWARE,
314 ACTIVE_STATE,
315 PREPARE_TERMINATE,
316 PAUSE_STATE,
317 MAX_PM_STATE
318};
319
320#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
321
322static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
323 [DISABLE_STATE] = "DISABLE",
324 [CONFIG_STATE] = "CONFIG",
325 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
326 [ACTIVE_STATE] = "ACTIVE",
327 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
328 [PAUSE_STATE] = "PAUSE",
329};
330
331#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
332 "INVALID" : gpi_pm_state_str[state])
333
334static const struct {
335 enum gpi_cmd gpi_cmd;
336 u32 opcode;
337 u32 state;
338 u32 timeout_ms;
339} gpi_cmd_info[GPI_MAX_CMD] = {
340 {
341 GPI_CH_CMD_ALLOCATE,
342 GPI_GPII_n_CH_CMD_ALLOCATE,
343 CH_STATE_ALLOCATED,
344 CMD_TIMEOUT_MS,
345 },
346 {
347 GPI_CH_CMD_START,
348 GPI_GPII_n_CH_CMD_START,
349 CH_STATE_STARTED,
350 CMD_TIMEOUT_MS,
351 },
352 {
353 GPI_CH_CMD_STOP,
354 GPI_GPII_n_CH_CMD_STOP,
355 CH_STATE_STOPPED,
356 CMD_TIMEOUT_MS,
357 },
358 {
359 GPI_CH_CMD_RESET,
360 GPI_GPII_n_CH_CMD_RESET,
361 CH_STATE_ALLOCATED,
362 CMD_TIMEOUT_MS,
363 },
364 {
365 GPI_CH_CMD_DE_ALLOC,
366 GPI_GPII_n_CH_CMD_DE_ALLOC,
367 CH_STATE_NOT_ALLOCATED,
368 CMD_TIMEOUT_MS,
369 },
370 {
371 GPI_CH_CMD_UART_SW_STALE,
372 GPI_GPII_n_CH_CMD_UART_SW_STALE,
373 STATE_IGNORE,
374 CMD_TIMEOUT_MS,
375 },
376 {
377 GPI_CH_CMD_UART_RFR_READY,
378 GPI_GPII_n_CH_CMD_UART_RFR_READY,
379 STATE_IGNORE,
380 CMD_TIMEOUT_MS,
381 },
382 {
383 GPI_CH_CMD_UART_RFR_NOT_READY,
384 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
385 STATE_IGNORE,
386 CMD_TIMEOUT_MS,
387 },
388 {
389 GPI_EV_CMD_ALLOCATE,
390 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
391 EV_STATE_ALLOCATED,
392 CMD_TIMEOUT_MS,
393 },
394 {
395 GPI_EV_CMD_RESET,
396 GPI_GPII_n_EV_CH_CMD_RESET,
397 EV_STATE_ALLOCATED,
398 CMD_TIMEOUT_MS,
399 },
400 {
401 GPI_EV_CMD_DEALLOC,
402 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
403 EV_STATE_NOT_ALLOCATED,
404 CMD_TIMEOUT_MS,
405 },
406};
407
408struct gpi_ring {
409 void *pre_aligned;
410 size_t alloc_size;
411 phys_addr_t phys_addr;
412 dma_addr_t dma_handle;
413 void *base;
414 void *wp;
415 void *rp;
416 u32 len;
417 u32 el_size;
418 u32 elements;
419 bool configured;
420};
421
422struct sg_tre {
423 void *ptr;
424 void *wp; /* store chan wp for debugging */
425};
426
427struct gpi_dbg_log {
428 void *addr;
429 u64 time;
430 u32 val;
431 bool read;
432};
433
434struct gpi_dev {
435 struct dma_device dma_device;
436 struct device *dev;
437 struct resource *res;
438 void __iomem *regs;
439 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
440 u32 gpii_mask; /* gpii instances available for apps */
441 u32 ev_factor; /* ev ring length factor */
442 struct gpii *gpiis;
443 void *ilctxt;
444 u32 ipc_log_lvl;
445 u32 klog_lvl;
446 struct dentry *dentry;
447};
448
449struct gpii_chan {
450 struct virt_dma_chan vc;
451 u32 chid;
452 u32 seid;
453 enum se_protocol protocol;
454 enum EV_PRIORITY priority; /* comes from clients DT node */
455 struct gpii *gpii;
456 enum gpi_ch_state ch_state;
457 enum gpi_pm_state pm_state;
458 void __iomem *ch_cntxt_base_reg;
459 void __iomem *ch_cntxt_db_reg;
460 void __iomem *ch_ring_base_lsb_reg,
461 *ch_ring_rp_lsb_reg,
462 *ch_ring_wp_lsb_reg;
463 void __iomem *ch_cmd_reg;
464 u32 req_tres; /* # of tre's client requested */
465 u32 dir;
466 struct gpi_ring ch_ring;
467 struct gpi_ring sg_ring; /* points to client scatterlist */
468 struct gpi_client_info client_info;
469};
470
471struct gpii {
472 u32 gpii_id;
473 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
474 struct gpi_dev *gpi_dev;
475 enum EV_PRIORITY ev_priority;
476 enum se_protocol protocol;
477 int irq;
478 void __iomem *regs; /* points to gpi top */
479 void __iomem *ev_cntxt_base_reg;
480 void __iomem *ev_cntxt_db_reg;
481 void __iomem *ev_ring_base_lsb_reg,
482 *ev_ring_rp_lsb_reg,
483 *ev_ring_wp_lsb_reg;
484 void __iomem *ev_cmd_reg;
485 void __iomem *ieob_src_reg;
486 void __iomem *ieob_clr_reg;
487 struct mutex ctrl_lock;
488 enum gpi_ev_state ev_state;
489 bool configured_irq;
490 enum gpi_pm_state pm_state;
491 rwlock_t pm_lock;
492 struct gpi_ring ev_ring;
493 struct tasklet_struct ev_task; /* event processing tasklet */
494 struct completion cmd_completion;
495 enum gpi_cmd gpi_cmd;
496 u32 cntxt_type_irq_msk;
497 void *ilctxt;
498 u32 ipc_log_lvl;
499 u32 klog_lvl;
500 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
501 atomic_t dbg_index;
502 char label[GPI_LABEL_SIZE];
503 struct dentry *dentry;
504};
505
506struct gpi_desc {
507 struct virt_dma_desc vd;
508 void *wp; /* points to TRE last queued during issue_pending */
509 struct sg_tre *sg_tre; /* points to last scatterlist */
510 void *db; /* DB register to program */
511 struct gpii_chan *gpii_chan;
512};
513
514const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
515 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
516};
517
518struct dentry *pdentry;
519static irqreturn_t gpi_handle_irq(int irq, void *data);
520static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
521static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
522static void gpi_process_events(struct gpii *gpii);
523
524static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
525{
526 return container_of(dma_chan, struct gpii_chan, vc.chan);
527}
528
529static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
530{
531 return container_of(vd, struct gpi_desc, vd);
532}
533
534static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
535 void *addr)
536{
537 return ring->phys_addr + (addr - ring->base);
538}
539
540static inline void *to_virtual(const struct gpi_ring *const ring,
541 phys_addr_t addr)
542{
543 return ring->base + (addr - ring->phys_addr);
544}
545
546#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
547static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
548{
549 u64 time = sched_clock();
550 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
551 u32 val;
552
553 val = readl_relaxed(addr);
554 index &= (GPI_DBG_LOG_SIZE - 1);
555 (gpii->dbg_log + index)->addr = addr;
556 (gpii->dbg_log + index)->time = time;
557 (gpii->dbg_log + index)->val = val;
558 (gpii->dbg_log + index)->read = true;
559 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
560 addr - gpii->regs, val);
561 return val;
562}
563static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
564{
565 u64 time = sched_clock();
566 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
567
568 index &= (GPI_DBG_LOG_SIZE - 1);
569 (gpii->dbg_log + index)->addr = addr;
570 (gpii->dbg_log + index)->time = time;
571 (gpii->dbg_log + index)->val = val;
572 (gpii->dbg_log + index)->read = false;
573
574 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
575 addr - gpii->regs, val);
576 writel_relaxed(val, addr);
577}
578#else
579static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
580{
581 u32 val = readl_relaxed(addr);
582
583 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
584 addr - gpii->regs, val);
585 return val;
586}
587static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
588{
589 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
590 addr - gpii->regs, val);
591 writel_relaxed(val, addr);
592}
593#endif
594
595/* gpi_write_reg_field - write to specific bit field */
596static inline void gpi_write_reg_field(struct gpii *gpii,
597 void __iomem *addr,
598 u32 mask,
599 u32 shift,
600 u32 val)
601{
602 u32 tmp = gpi_read_reg(gpii, addr);
603
604 tmp &= ~mask;
605 val = tmp | ((val << shift) & mask);
606 gpi_write_reg(gpii, addr, val);
607}
608
609static void gpi_disable_interrupts(struct gpii *gpii)
610{
611 struct {
612 u32 offset;
613 u32 mask;
614 u32 shift;
615 u32 val;
616 } default_reg[] = {
617 {
618 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
619 (gpii->gpii_id),
620 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
621 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
622 0,
623 },
624 {
625 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
626 (gpii->gpii_id),
627 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
628 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
629 0,
630 },
631 {
632 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
633 (gpii->gpii_id),
634 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
635 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
636 0,
637 },
638 {
639 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
640 (gpii->gpii_id),
641 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
642 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
643 0,
644 },
645 {
646 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
647 (gpii->gpii_id),
648 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
649 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
650 0,
651 },
652 {
653 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
654 (gpii->gpii_id),
655 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
656 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
657 0,
658 },
659 {
660 GPI_GPII_n_CNTXT_INTSET_OFFS
661 (gpii->gpii_id),
662 GPI_GPII_n_CNTXT_INTSET_BMSK,
663 GPI_GPII_n_CNTXT_INTSET_SHFT,
664 0,
665 },
666 { 0 },
667 };
668 int i;
669
670 for (i = 0; default_reg[i].offset; i++)
671 gpi_write_reg_field(gpii, gpii->regs +
672 default_reg[i].offset,
673 default_reg[i].mask,
674 default_reg[i].shift,
675 default_reg[i].val);
676 gpii->cntxt_type_irq_msk = 0;
677 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
678 gpii->configured_irq = false;
679}
680
681/* configure and enable interrupts */
682static int gpi_config_interrupts(struct gpii *gpii,
683 enum gpii_irq_settings settings,
684 bool mask)
685{
686 int ret;
687 int i;
688 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
689 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
690 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
691 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
692 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
693 struct {
694 u32 offset;
695 u32 mask;
696 u32 shift;
697 u32 val;
698 } default_reg[] = {
699 {
700 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
701 (gpii->gpii_id),
702 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
703 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
704 def_type,
705 },
706 {
707 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
708 (gpii->gpii_id),
709 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
710 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
711 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
712 },
713 {
714 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
715 (gpii->gpii_id),
716 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
717 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
718 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
719 },
720 {
721 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
722 (gpii->gpii_id),
723 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
724 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
725 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
726 },
727 {
728 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
729 (gpii->gpii_id),
730 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
731 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
732 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
733 },
734 {
735 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
736 (gpii->gpii_id),
737 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
738 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
739 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
740 },
741 {
742 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
743 (gpii->gpii_id),
744 U32_MAX,
745 0,
746 0x0,
747 },
748 {
749 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
750 (gpii->gpii_id),
751 U32_MAX,
752 0,
753 0x0,
754 },
755 {
756 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
757 (gpii->gpii_id),
758 U32_MAX,
759 0,
760 0x0,
761 },
762 {
763 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
764 (gpii->gpii_id),
765 U32_MAX,
766 0,
767 0x0,
768 },
769 {
770 GPI_GPII_n_CNTXT_INTSET_OFFS
771 (gpii->gpii_id),
772 GPI_GPII_n_CNTXT_INTSET_BMSK,
773 GPI_GPII_n_CNTXT_INTSET_SHFT,
774 0x01,
775 },
776 {
777 GPI_GPII_n_ERROR_LOG_OFFS
778 (gpii->gpii_id),
779 U32_MAX,
780 0,
781 0x00,
782 },
783 { 0 },
784 };
785
786 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
787 (gpii->configured_irq) ? 'F' : 'T',
788 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
789 (mask) ? 'T' : 'F');
790
791 if (gpii->configured_irq == false) {
792 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
793 gpi_handle_irq, IRQF_TRIGGER_HIGH,
794 gpii->label, gpii);
795 if (ret < 0) {
796 GPII_CRITIC(gpii, GPI_DBG_COMMON,
797 "error request irq:%d ret:%d\n",
798 gpii->irq, ret);
799 return ret;
800 }
801 }
802
803 if (settings == MASK_IEOB_SETTINGS) {
804 /*
805 * GPII only uses one EV ring per gpii so we can globally
806 * enable/disable IEOB interrupt
807 */
808 if (mask)
809 gpii->cntxt_type_irq_msk |=
810 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
811 else
812 gpii->cntxt_type_irq_msk &=
813 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
814 gpi_write_reg_field(gpii, gpii->regs +
815 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
816 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
817 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
818 gpii->cntxt_type_irq_msk);
819 } else {
820 for (i = 0; default_reg[i].offset; i++)
821 gpi_write_reg_field(gpii, gpii->regs +
822 default_reg[i].offset,
823 default_reg[i].mask,
824 default_reg[i].shift,
825 default_reg[i].val);
826 gpii->cntxt_type_irq_msk = def_type;
827 };
828
829 gpii->configured_irq = true;
830
831 return 0;
832}
833
834/* Sends gpii event or channel command */
835static int gpi_send_cmd(struct gpii *gpii,
836 struct gpii_chan *gpii_chan,
837 enum gpi_cmd gpi_cmd)
838{
839 u32 chid = MAX_CHANNELS_PER_GPII;
840 u32 cmd;
841 unsigned long timeout;
842 void __iomem *cmd_reg;
843
844 if (gpi_cmd >= GPI_MAX_CMD)
845 return -EINVAL;
846 if (IS_CHAN_CMD(gpi_cmd))
847 chid = gpii_chan->chid;
848
849 GPII_INFO(gpii, chid,
850 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
851
852 /* send opcode and wait for completion */
853 reinit_completion(&gpii->cmd_completion);
854 gpii->gpi_cmd = gpi_cmd;
855
856 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
857 gpii->ev_cmd_reg;
858 cmd = IS_CHAN_CMD(gpi_cmd) ?
859 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
860 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
861 gpi_write_reg(gpii, cmd_reg, cmd);
862 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
863 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
864
865 if (!timeout) {
866 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
867 TO_GPI_CMD_STR(gpi_cmd));
868 return -EIO;
869 }
870
871 /* confirm new ch state is correct , if the cmd is a state change cmd */
872 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
873 return 0;
874 if (IS_CHAN_CMD(gpi_cmd) &&
875 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
876 return 0;
877 if (!IS_CHAN_CMD(gpi_cmd) &&
878 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
879 return 0;
880
881 return -EIO;
882}
883
884/* program transfer ring DB register */
885static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
886 struct gpi_ring *ring,
887 void *wp)
888{
889 struct gpii *gpii = gpii_chan->gpii;
890 phys_addr_t p_wp;
891
892 p_wp = to_physical(ring, wp);
893 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
894}
895
896/* program event ring DB register */
897static inline void gpi_write_ev_db(struct gpii *gpii,
898 struct gpi_ring *ring,
899 void *wp)
900{
901 phys_addr_t p_wp;
902
903 p_wp = ring->phys_addr + (wp - ring->base);
904 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
905}
906
907/* notify client with generic event */
908static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
909 enum msm_gpi_cb_event event,
910 u64 status)
911{
912 struct gpii *gpii = gpii_chan->gpii;
913 struct gpi_client_info *client_info = &gpii_chan->client_info;
914 struct msm_gpi_cb msm_gpi_cb = {0};
915
916 GPII_ERR(gpii, gpii_chan->chid,
917 "notifying event:%s with status:%llu\n",
918 TO_GPI_CB_EVENT_STR(event), status);
919
920 msm_gpi_cb.cb_event = event;
921 msm_gpi_cb.status = status;
922 msm_gpi_cb.timestamp = sched_clock();
923 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
924 client_info->cb_param);
925}
926
927/* process transfer completion interrupt */
928static void gpi_process_ieob(struct gpii *gpii)
929{
930 u32 ieob_irq;
931
932 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
933 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
934 GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
935
936 /* process events based on priority */
937 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
938 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
939 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
940 tasklet_schedule(&gpii->ev_task);
941 } else {
942 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
943 gpi_process_events(gpii);
944 }
945}
946
947/* process channel control interrupt */
948static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
949{
950 u32 gpii_id = gpii->gpii_id;
951 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
952 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
953 u32 chid;
954 struct gpii_chan *gpii_chan;
955 u32 state;
956
957 /* clear the status */
958 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
959 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
960
961 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
962 if (!(BIT(chid) & ch_irq))
963 continue;
964
965 gpii_chan = &gpii->gpii_chan[chid];
966 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
967 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
968 CNTXT_0_CONFIG);
969 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
970 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
971
972 /*
973 * CH_CMD_DEALLOC cmd always successful. However cmd does
974 * not change hardware status. So overwriting software state
975 * to default state.
976 */
977 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
978 state = DEFAULT_CH_STATE;
979 gpii_chan->ch_state = state;
980 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
981 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
982
983 /*
984 * Triggering complete all if ch_state is not a stop in process.
985 * Stop in process is a transition state and we will wait for
986 * stop interrupt before notifying.
987 */
988 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
989 complete_all(&gpii->cmd_completion);
990
991 /* notifying clients if in error state */
992 if (gpii_chan->ch_state == CH_STATE_ERROR)
993 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
994 __LINE__);
995 }
996}
997
998/* processing gpi level error interrupts */
999static void gpi_process_glob_err_irq(struct gpii *gpii)
1000{
1001 u32 gpii_id = gpii->gpii_id;
1002 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1003 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1004 u32 error_log;
1005 u32 chid;
1006 struct gpii_chan *gpii_chan;
1007 struct gpi_client_info *client_info;
1008 struct msm_gpi_cb msm_gpi_cb;
1009 struct gpi_error_log_entry *log_entry =
1010 (struct gpi_error_log_entry *)&error_log;
1011
1012 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1013 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1014
1015 /* only error interrupt should be set */
1016 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1017 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1018 irq_stts);
1019 goto error_irq;
1020 }
1021
1022 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1023 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1024 gpi_write_reg(gpii, gpii->regs + offset, 0);
1025
1026 /* get channel info */
1027 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1028 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1029 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1030 chid);
1031 goto error_irq;
1032 }
1033
1034 gpii_chan = &gpii->gpii_chan[chid];
1035 client_info = &gpii_chan->client_info;
1036
1037 /* notify client with error log */
1038 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1039 msm_gpi_cb.error_log.routine = log_entry->routine;
1040 msm_gpi_cb.error_log.type = log_entry->type;
1041 msm_gpi_cb.error_log.error_code = log_entry->code;
1042 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1043 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1044 GPII_ERR(gpii, gpii_chan->chid,
1045 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1046 log_entry->ee, log_entry->chtype,
1047 msm_gpi_cb.error_log.routine,
1048 msm_gpi_cb.error_log.type,
1049 msm_gpi_cb.error_log.error_code);
1050 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1051 client_info->cb_param);
1052
1053 return;
1054
1055error_irq:
1056 for (chid = 0, gpii_chan = gpii->gpii_chan;
1057 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1058 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1059 irq_stts);
1060}
1061
1062/* gpii interrupt handler */
1063static irqreturn_t gpi_handle_irq(int irq, void *data)
1064{
1065 struct gpii *gpii = data;
1066 u32 type;
1067 unsigned long flags;
1068 u32 offset;
1069 u32 gpii_id = gpii->gpii_id;
1070
1071 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1072
1073 read_lock_irqsave(&gpii->pm_lock, flags);
1074
1075 /*
1076 * States are out of sync to receive interrupt
1077 * while software state is in DISABLE state, bailing out.
1078 */
1079 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1080 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1081 "receive interrupt while in %s state\n",
1082 TO_GPI_PM_STR(gpii->pm_state));
1083 goto exit_irq;
1084 }
1085
1086 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1087 type = gpi_read_reg(gpii, gpii->regs + offset);
1088
1089 do {
1090 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1091 type);
1092 /* global gpii error */
1093 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1094 GPII_ERR(gpii, GPI_DBG_COMMON,
1095 "processing global error irq\n");
1096 gpi_process_glob_err_irq(gpii);
1097 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1098 }
1099
1100 /* event control irq */
1101 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1102 u32 ev_state;
1103 u32 ev_ch_irq;
1104
1105 GPII_INFO(gpii, GPI_DBG_COMMON,
1106 "processing EV CTRL interrupt\n");
1107 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1108 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1109
1110 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1111 (gpii_id);
1112 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1113 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1114 CNTXT_0_CONFIG);
1115 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1116 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1117
1118 /*
1119 * CMD EV_CMD_DEALLOC is always successful. However
1120 * cmd does not change hardware status. So overwriting
1121 * software state to default state.
1122 */
1123 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1124 ev_state = DEFAULT_EV_CH_STATE;
1125
1126 gpii->ev_state = ev_state;
1127 GPII_INFO(gpii, GPI_DBG_COMMON,
1128 "setting EV state to %s\n",
1129 TO_GPI_EV_STATE_STR(gpii->ev_state));
1130 complete_all(&gpii->cmd_completion);
1131 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1132 }
1133
1134 /* channel control irq */
1135 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1136 GPII_INFO(gpii, GPI_DBG_COMMON,
1137 "process CH CTRL interrupts\n");
1138 gpi_process_ch_ctrl_irq(gpii);
1139 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1140 }
1141
1142 /* transfer complete interrupt */
1143 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1144 GPII_VERB(gpii, GPI_DBG_COMMON,
1145 "process IEOB interrupts\n");
1146 gpi_process_ieob(gpii);
1147 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1148 }
1149
1150 if (type) {
1151 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1152 "Unhandled interrupt status:0x%x\n", type);
1153 goto exit_irq;
1154 }
1155 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1156 type = gpi_read_reg(gpii, gpii->regs + offset);
1157 } while (type);
1158
1159exit_irq:
1160 read_unlock_irqrestore(&gpii->pm_lock, flags);
1161 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1162
1163 return IRQ_HANDLED;
1164}
1165
1166/* process qup notification events */
1167static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1168 struct qup_notif_event *notif_event)
1169{
1170 struct gpii *gpii = gpii_chan->gpii;
1171 struct gpi_client_info *client_info = &gpii_chan->client_info;
1172 struct msm_gpi_cb msm_gpi_cb;
1173
1174 GPII_VERB(gpii, gpii_chan->chid,
1175 "status:0x%x time:0x%x count:0x%x\n",
1176 notif_event->status, notif_event->time, notif_event->count);
1177
1178 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1179 msm_gpi_cb.status = notif_event->status;
1180 msm_gpi_cb.timestamp = notif_event->time;
1181 msm_gpi_cb.count = notif_event->count;
1182 GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
1183 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1184 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1185 client_info->cb_param);
1186}
1187
1188/* process DMA Immediate completion data events */
1189static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1190 struct immediate_data_event *imed_event)
1191{
1192 struct gpii *gpii = gpii_chan->gpii;
1193 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1194 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1195 struct virt_dma_desc *vd;
1196 struct gpi_desc *gpi_desc;
1197 struct msm_gpi_tre *client_tre;
1198 void *sg_tre;
1199 void *tre = ch_ring->base +
1200 (ch_ring->el_size * imed_event->tre_index);
1201 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1202
1203 /*
1204 * If channel not active don't process event but let
1205 * client know pending event is available
1206 */
1207 if (gpii_chan->pm_state != ACTIVE_STATE) {
1208 GPII_ERR(gpii, gpii_chan->chid,
1209 "skipping processing event because ch @ %s state\n",
1210 TO_GPI_PM_STR(gpii_chan->pm_state));
1211 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1212 __LINE__);
1213 return;
1214 }
1215
1216 spin_lock_irq(&gpii_chan->vc.lock);
1217 vd = vchan_next_desc(&gpii_chan->vc);
1218 if (!vd) {
1219 struct gpi_ere *gpi_ere;
1220 struct msm_gpi_tre *gpi_tre;
1221
1222 spin_unlock_irq(&gpii_chan->vc.lock);
1223 GPII_ERR(gpii, gpii_chan->chid,
1224 "event without a pending descriptor!\n");
1225 gpi_ere = (struct gpi_ere *)imed_event;
1226 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1227 gpi_ere->dword[0], gpi_ere->dword[1],
1228 gpi_ere->dword[2], gpi_ere->dword[3]);
1229 gpi_tre = tre;
1230 GPII_ERR(gpii, gpii_chan->chid,
1231 "Pending TRE: %08x %08x %08x %08x\n",
1232 gpi_tre->dword[0], gpi_tre->dword[1],
1233 gpi_tre->dword[2], gpi_tre->dword[3]);
1234 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1235 __LINE__);
1236 return;
1237 }
1238 gpi_desc = to_gpi_desc(vd);
1239
1240 /* Event TR RP gen. don't match descriptor TR */
1241 if (gpi_desc->wp != tre) {
1242 spin_unlock_irq(&gpii_chan->vc.lock);
1243 GPII_ERR(gpii, gpii_chan->chid,
1244 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1245 to_physical(ch_ring, gpi_desc->wp),
1246 to_physical(ch_ring, tre));
1247 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1248 __LINE__);
1249 return;
1250 }
1251
1252 list_del(&vd->node);
1253 spin_unlock_irq(&gpii_chan->vc.lock);
1254
1255 sg_tre = gpi_desc->sg_tre;
1256 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1257
1258 /*
1259 * RP pointed by Event is to last TRE processed,
1260 * we need to update ring rp to tre + 1
1261 */
1262 tre += ch_ring->el_size;
1263 if (tre >= (ch_ring->base + ch_ring->len))
1264 tre = ch_ring->base;
1265 ch_ring->rp = tre;
1266 sg_tre += sg_ring->el_size;
1267 if (sg_tre >= (sg_ring->base + sg_ring->len))
1268 sg_tre = sg_ring->base;
1269 sg_ring->rp = sg_tre;
1270
1271 /* make sure rp updates are immediately visible to all cores */
1272 smp_wmb();
1273
1274 /* update Immediate data from Event back in to TRE if it's RX channel */
1275 if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
1276 client_tre->dword[0] =
1277 ((struct msm_gpi_tre *)imed_event)->dword[0];
1278 client_tre->dword[1] =
1279 ((struct msm_gpi_tre *)imed_event)->dword[1];
1280 client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
1281 imed_event->length);
1282 }
1283
1284 tx_cb_param = vd->tx.callback_param;
1285 if (tx_cb_param) {
1286 GPII_VERB(gpii, gpii_chan->chid,
1287 "cb_length:%u compl_code:0x%x status:0x%x\n",
1288 imed_event->length, imed_event->code,
1289 imed_event->status);
1290 tx_cb_param->length = imed_event->length;
1291 tx_cb_param->completion_code = imed_event->code;
1292 tx_cb_param->status = imed_event->status;
1293 }
1294
1295 spin_lock_irq(&gpii_chan->vc.lock);
1296 vchan_cookie_complete(vd);
1297 spin_unlock_irq(&gpii_chan->vc.lock);
1298}
1299
1300/* processing transfer completion events */
1301static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1302 struct xfer_compl_event *compl_event)
1303{
1304 struct gpii *gpii = gpii_chan->gpii;
1305 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1306 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1307 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
1308 struct msm_gpi_tre *client_tre;
1309 struct virt_dma_desc *vd;
1310 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1311 struct gpi_desc *gpi_desc;
1312 void *sg_tre = NULL;
1313
1314 /* only process events on active channel */
1315 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1316 GPII_ERR(gpii, gpii_chan->chid,
1317 "skipping processing event because ch @ %s state\n",
1318 TO_GPI_PM_STR(gpii_chan->pm_state));
1319 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1320 __LINE__);
1321 return;
1322 }
1323
1324 spin_lock_irq(&gpii_chan->vc.lock);
1325 vd = vchan_next_desc(&gpii_chan->vc);
1326 if (!vd) {
1327 struct gpi_ere *gpi_ere;
1328
1329 spin_unlock_irq(&gpii_chan->vc.lock);
1330 GPII_ERR(gpii, gpii_chan->chid,
1331 "Event without a pending descriptor!\n");
1332 gpi_ere = (struct gpi_ere *)compl_event;
1333 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1334 gpi_ere->dword[0], gpi_ere->dword[1],
1335 gpi_ere->dword[2], gpi_ere->dword[3]);
1336 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1337 __LINE__);
1338 return;
1339 }
1340
1341 gpi_desc = to_gpi_desc(vd);
1342
1343 /* TRE Event generated didn't match descriptor's TRE */
1344 if (gpi_desc->wp != ev_rp) {
1345 spin_unlock_irq(&gpii_chan->vc.lock);
1346 GPII_ERR(gpii, gpii_chan->chid,
1347 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1348 to_physical(ch_ring, gpi_desc->wp),
1349 to_physical(ch_ring, ev_rp));
1350 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1351 __LINE__);
1352 return;
1353 }
1354
1355 list_del(&vd->node);
1356 spin_unlock_irq(&gpii_chan->vc.lock);
1357
1358 sg_tre = gpi_desc->sg_tre;
1359 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1360
1361 /*
1362 * RP pointed by Event is to last TRE processed,
1363 * we need to update ring rp to ev_rp + 1
1364 */
1365 ev_rp += ch_ring->el_size;
1366 if (ev_rp >= (ch_ring->base + ch_ring->len))
1367 ev_rp = ch_ring->base;
1368 ch_ring->rp = ev_rp;
1369 sg_tre += sg_ring->el_size;
1370 if (sg_tre >= (sg_ring->base + sg_ring->len))
1371 sg_tre = sg_ring->base;
1372 sg_ring->rp = sg_tre;
1373
1374 /* update must be visible to other cores */
1375 smp_wmb();
1376
1377 tx_cb_param = vd->tx.callback_param;
1378 if (tx_cb_param) {
1379 GPII_VERB(gpii, gpii_chan->chid,
1380 "cb_length:%u compl_code:0x%x status:0x%x\n",
1381 compl_event->length, compl_event->code,
1382 compl_event->status);
1383 tx_cb_param->length = compl_event->length;
1384 tx_cb_param->completion_code = compl_event->code;
1385 tx_cb_param->status = compl_event->status;
1386 }
1387
1388 spin_lock_irq(&gpii_chan->vc.lock);
1389 vchan_cookie_complete(vd);
1390 spin_unlock_irq(&gpii_chan->vc.lock);
1391}
1392
1393/* process all events */
1394static void gpi_process_events(struct gpii *gpii)
1395{
1396 struct gpi_ring *ev_ring = &gpii->ev_ring;
1397 u32 cntxt_rp, local_rp;
1398 union gpi_event *gpi_event;
1399 struct gpii_chan *gpii_chan;
1400 u32 chid, type;
1401 u32 ieob_irq;
1402
1403 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1404 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1405
1406 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
1407 cntxt_rp, local_rp);
1408
1409 do {
1410 while (local_rp != cntxt_rp) {
1411 gpi_event = ev_ring->rp;
1412 chid = gpi_event->xfer_compl_event.chid;
1413 type = gpi_event->xfer_compl_event.type;
1414 GPII_VERB(gpii, GPI_DBG_COMMON,
1415 "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
1416 local_rp, chid, type,
1417 gpi_event->gpi_ere.dword[0],
1418 gpi_event->gpi_ere.dword[1],
1419 gpi_event->gpi_ere.dword[2],
1420 gpi_event->gpi_ere.dword[3]);
1421
1422 switch (type) {
1423 case XFER_COMPLETE_EV_TYPE:
1424 gpii_chan = &gpii->gpii_chan[chid];
1425 gpi_process_xfer_compl_event(gpii_chan,
1426 &gpi_event->xfer_compl_event);
1427 break;
1428 case STALE_EV_TYPE:
1429 GPII_VERB(gpii, GPI_DBG_COMMON,
1430 "stale event, not processing\n");
1431 break;
1432 case IMMEDIATE_DATA_EV_TYPE:
1433 gpii_chan = &gpii->gpii_chan[chid];
1434 gpi_process_imed_data_event(gpii_chan,
1435 &gpi_event->immediate_data_event);
1436 break;
1437 case QUP_NOTIF_EV_TYPE:
1438 gpii_chan = &gpii->gpii_chan[chid];
1439 gpi_process_qup_notif_event(gpii_chan,
1440 &gpi_event->qup_notif_event);
1441 break;
1442 default:
1443 GPII_VERB(gpii, GPI_DBG_COMMON,
1444 "not supported event type:0x%x\n",
1445 type);
1446 }
1447 gpi_ring_recycle_ev_element(ev_ring);
1448 local_rp = (u32)to_physical(ev_ring,
1449 (void *)ev_ring->rp);
1450 }
1451 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1452
1453 /* clear pending IEOB events */
1454 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
1455 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
1456
1457 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1458 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1459
1460 } while (cntxt_rp != local_rp);
1461
1462 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
1463 local_rp);
1464}
1465
1466/* processing events using tasklet */
1467static void gpi_ev_tasklet(unsigned long data)
1468{
1469 struct gpii *gpii = (struct gpii *)data;
1470
1471 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1472
1473 read_lock_bh(&gpii->pm_lock);
1474 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1475 read_unlock_bh(&gpii->pm_lock);
1476 GPII_ERR(gpii, GPI_DBG_COMMON,
1477 "not processing any events, pm_state:%s\n",
1478 TO_GPI_PM_STR(gpii->pm_state));
1479 return;
1480 }
1481
1482 /* process the events */
1483 gpi_process_events(gpii);
1484
1485 /* enable IEOB, switching back to interrupts */
1486 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1487 read_unlock_bh(&gpii->pm_lock);
1488
1489 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1490}
1491
1492/* marks all pending events for the channel as stale */
1493void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1494{
1495 struct gpii *gpii = gpii_chan->gpii;
1496 struct gpi_ring *ev_ring = &gpii->ev_ring;
1497 void *ev_rp;
1498 u32 cntxt_rp, local_rp;
1499
1500 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1501 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1502
1503 ev_rp = ev_ring->rp;
1504 local_rp = (u32)to_physical(ev_ring, ev_rp);
1505 while (local_rp != cntxt_rp) {
1506 union gpi_event *gpi_event = ev_rp;
1507 u32 chid = gpi_event->xfer_compl_event.chid;
1508
1509 if (chid == gpii_chan->chid)
1510 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1511 ev_rp += ev_ring->el_size;
1512 if (ev_rp >= (ev_ring->base + ev_ring->len))
1513 ev_rp = ev_ring->base;
1514 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1515 local_rp = (u32)to_physical(ev_ring, ev_rp);
1516 }
1517}
1518
1519/* reset sw state and issue channel reset or de-alloc */
1520static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1521{
1522 struct gpii *gpii = gpii_chan->gpii;
1523 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1524 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1525 unsigned long flags;
1526 LIST_HEAD(list);
1527 int ret;
1528
1529 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1530 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1531 if (ret) {
1532 GPII_ERR(gpii, gpii_chan->chid,
1533 "Error with cmd:%s ret:%d\n",
1534 TO_GPI_CMD_STR(gpi_cmd), ret);
1535 return ret;
1536 }
1537
1538 /* initialize the local ring ptrs */
1539 ch_ring->rp = ch_ring->base;
1540 ch_ring->wp = ch_ring->base;
1541 sg_ring->rp = sg_ring->base;
1542 sg_ring->wp = sg_ring->base;
1543
1544 /* visible to other cores */
1545 smp_wmb();
1546
1547 /* check event ring for any stale events */
1548 write_lock_irq(&gpii->pm_lock);
1549 gpi_mark_stale_events(gpii_chan);
1550
1551 /* remove all async descriptors */
1552 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1553 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1554 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1555 write_unlock_irq(&gpii->pm_lock);
1556 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1557
1558 return 0;
1559}
1560
1561static int gpi_start_chan(struct gpii_chan *gpii_chan)
1562{
1563 struct gpii *gpii = gpii_chan->gpii;
1564 int ret;
1565
1566 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1567
1568 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1569 if (ret) {
1570 GPII_ERR(gpii, gpii_chan->chid,
1571 "Error with cmd:%s ret:%d\n",
1572 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1573 return ret;
1574 }
1575
1576 /* gpii CH is active now */
1577 write_lock_irq(&gpii->pm_lock);
1578 gpii_chan->pm_state = ACTIVE_STATE;
1579 write_unlock_irq(&gpii->pm_lock);
1580
1581 return 0;
1582}
1583
1584/* allocate and configure the transfer channel */
1585static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1586{
1587 struct gpii *gpii = gpii_chan->gpii;
1588 struct gpi_ring *ring = &gpii_chan->ch_ring;
1589 int i;
1590 int ret;
1591 struct {
1592 void *base;
1593 int offset;
1594 u32 val;
1595 } ch_reg[] = {
1596 {
1597 gpii_chan->ch_cntxt_base_reg,
1598 CNTXT_0_CONFIG,
1599 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1600 gpii_chan->dir,
1601 GPI_CHTYPE_PROTO_GPI),
1602 },
1603 {
1604 gpii_chan->ch_cntxt_base_reg,
1605 CNTXT_1_R_LENGTH,
1606 ring->len,
1607 },
1608 {
1609 gpii_chan->ch_cntxt_base_reg,
1610 CNTXT_2_RING_BASE_LSB,
1611 (u32)ring->phys_addr,
1612 },
1613 {
1614 gpii_chan->ch_cntxt_base_reg,
1615 CNTXT_3_RING_BASE_MSB,
1616 (u32)(ring->phys_addr >> 32),
1617 },
1618 { /* program MSB of DB register with ring base */
1619 gpii_chan->ch_cntxt_db_reg,
1620 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1621 (u32)(ring->phys_addr >> 32),
1622 },
1623 {
1624 gpii->regs,
1625 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1626 gpii_chan->chid),
1627 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1628 gpii_chan->protocol,
1629 gpii_chan->seid),
1630 },
1631 {
1632 gpii->regs,
1633 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1634 gpii_chan->chid),
1635 0,
1636 },
1637 {
1638 gpii->regs,
1639 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1640 gpii_chan->chid),
1641 0,
1642 },
1643 {
1644 gpii->regs,
1645 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1646 gpii_chan->chid),
1647 0,
1648 },
1649 {
1650 gpii->regs,
1651 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1652 gpii_chan->chid),
1653 1,
1654 },
1655 { NULL },
1656 };
1657
1658 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1659
1660 if (send_alloc_cmd) {
1661 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1662 if (ret) {
1663 GPII_ERR(gpii, gpii_chan->chid,
1664 "Error with cmd:%s ret:%d\n",
1665 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1666 return ret;
1667 }
1668 }
1669
1670 /* program channel cntxt registers */
1671 for (i = 0; ch_reg[i].base; i++)
1672 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1673 ch_reg[i].val);
1674 /* flush all the writes */
1675 wmb();
1676 return 0;
1677}
1678
1679/* allocate and configure event ring */
1680static int gpi_alloc_ev_chan(struct gpii *gpii)
1681{
1682 struct gpi_ring *ring = &gpii->ev_ring;
1683 int i;
1684 int ret;
1685 struct {
1686 void *base;
1687 int offset;
1688 u32 val;
1689 } ev_reg[] = {
1690 {
1691 gpii->ev_cntxt_base_reg,
1692 CNTXT_0_CONFIG,
1693 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1694 GPI_INTTYPE_IRQ,
1695 GPI_CHTYPE_GPI_EV),
1696 },
1697 {
1698 gpii->ev_cntxt_base_reg,
1699 CNTXT_1_R_LENGTH,
1700 ring->len,
1701 },
1702 {
1703 gpii->ev_cntxt_base_reg,
1704 CNTXT_2_RING_BASE_LSB,
1705 (u32)ring->phys_addr,
1706 },
1707 {
1708 gpii->ev_cntxt_base_reg,
1709 CNTXT_3_RING_BASE_MSB,
1710 (u32)(ring->phys_addr >> 32),
1711 },
1712 {
1713 /* program db msg with ring base msb */
1714 gpii->ev_cntxt_db_reg,
1715 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1716 (u32)(ring->phys_addr >> 32),
1717 },
1718 {
1719 gpii->ev_cntxt_base_reg,
1720 CNTXT_8_RING_INT_MOD,
1721 0,
1722 },
1723 {
1724 gpii->ev_cntxt_base_reg,
1725 CNTXT_10_RING_MSI_LSB,
1726 0,
1727 },
1728 {
1729 gpii->ev_cntxt_base_reg,
1730 CNTXT_11_RING_MSI_MSB,
1731 0,
1732 },
1733 {
1734 gpii->ev_cntxt_base_reg,
1735 CNTXT_8_RING_INT_MOD,
1736 0,
1737 },
1738 {
1739 gpii->ev_cntxt_base_reg,
1740 CNTXT_12_RING_RP_UPDATE_LSB,
1741 0,
1742 },
1743 {
1744 gpii->ev_cntxt_base_reg,
1745 CNTXT_13_RING_RP_UPDATE_MSB,
1746 0,
1747 },
1748 { NULL },
1749 };
1750
1751 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1752
1753 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1754 if (ret) {
1755 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1756 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1757 return ret;
1758 }
1759
1760 /* program event context */
1761 for (i = 0; ev_reg[i].base; i++)
1762 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1763 ev_reg[i].val);
1764
1765 /* add events to ring */
1766 ring->wp = (ring->base + ring->len - ring->el_size);
1767
1768 /* flush all the writes */
1769 wmb();
1770
1771 /* gpii is active now */
1772 write_lock_irq(&gpii->pm_lock);
1773 gpii->pm_state = ACTIVE_STATE;
1774 write_unlock_irq(&gpii->pm_lock);
1775 gpi_write_ev_db(gpii, ring, ring->wp);
1776
1777 return 0;
1778}
1779
1780/* calculate # of ERE/TRE available to queue */
1781static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1782{
1783 int elements = 0;
1784
1785 if (ring->wp < ring->rp)
1786 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1787 else {
1788 elements = (ring->rp - ring->base) / ring->el_size;
1789 elements += ((ring->base + ring->len - ring->wp) /
1790 ring->el_size) - 1;
1791 }
1792
1793 return elements;
1794}
1795
1796static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1797{
1798
1799 if (gpi_ring_num_elements_avail(ring) <= 0)
1800 return -ENOMEM;
1801
1802 *wp = ring->wp;
1803 ring->wp += ring->el_size;
1804 if (ring->wp >= (ring->base + ring->len))
1805 ring->wp = ring->base;
1806
1807 /* visible to other cores */
1808 smp_wmb();
1809
1810 return 0;
1811}
1812
1813static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1814{
1815 /* Update the WP */
1816 ring->wp += ring->el_size;
1817 if (ring->wp >= (ring->base + ring->len))
1818 ring->wp = ring->base;
1819
1820 /* Update the RP */
1821 ring->rp += ring->el_size;
1822 if (ring->rp >= (ring->base + ring->len))
1823 ring->rp = ring->base;
1824
1825 /* visible to other cores */
1826 smp_wmb();
1827}
1828
1829static void gpi_free_ring(struct gpi_ring *ring,
1830 struct gpii *gpii)
1831{
1832 if (ring->dma_handle)
1833 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1834 ring->pre_aligned, ring->dma_handle);
1835 else
1836 vfree(ring->pre_aligned);
1837 memset(ring, 0, sizeof(*ring));
1838}
1839
1840/* allocate memory for transfer and event rings */
1841static int gpi_alloc_ring(struct gpi_ring *ring,
1842 u32 elements,
1843 u32 el_size,
1844 struct gpii *gpii,
1845 bool alloc_coherent)
1846{
1847 u64 len = elements * el_size;
1848 int bit;
1849
1850 if (alloc_coherent) {
1851 /* ring len must be power of 2 */
1852 bit = find_last_bit((unsigned long *)&len, 32);
1853 if (((1 << bit) - 1) & len)
1854 bit++;
1855 len = 1 << bit;
1856 ring->alloc_size = (len + (len - 1));
1857 GPII_INFO(gpii, GPI_DBG_COMMON,
1858 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
1859 elements, el_size, (elements * el_size), len,
1860 ring->alloc_size);
1861 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1862 ring->alloc_size,
1863 &ring->dma_handle,
1864 GFP_KERNEL);
1865 if (!ring->pre_aligned) {
1866 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1867 "could not alloc size:%lu mem for ring\n",
1868 ring->alloc_size);
1869 return -ENOMEM;
1870 }
1871
1872 /* align the physical mem */
1873 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1874 ring->base = ring->pre_aligned +
1875 (ring->phys_addr - ring->dma_handle);
1876 } else {
1877 ring->pre_aligned = vmalloc(len);
1878 if (!ring->pre_aligned) {
1879 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1880 "could not allocsize:%llu mem for ring\n",
1881 len);
1882 return -ENOMEM;
1883 }
1884 ring->phys_addr = 0;
1885 ring->dma_handle = 0;
1886 ring->base = ring->pre_aligned;
1887 }
1888
1889 ring->rp = ring->base;
1890 ring->wp = ring->base;
1891 ring->len = len;
1892 ring->el_size = el_size;
1893 ring->elements = ring->len / ring->el_size;
1894 memset(ring->base, 0, ring->len);
1895 ring->configured = true;
1896
1897 /* update to other cores */
1898 smp_wmb();
1899
1900 GPII_INFO(gpii, GPI_DBG_COMMON,
1901 "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
1902 ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
1903 ring->elements);
1904
1905 return 0;
1906}
1907
1908/* copy tre into transfer ring */
1909static void gpi_queue_xfer(struct gpii *gpii,
1910 struct gpii_chan *gpii_chan,
1911 struct msm_gpi_tre *gpi_tre,
1912 void **wp,
1913 struct sg_tre **sg_tre)
1914{
1915 struct msm_gpi_tre *ch_tre;
1916 int ret;
1917
1918 /* get next tre location we can copy */
1919 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1920 if (unlikely(ret)) {
1921 GPII_CRITIC(gpii, gpii_chan->chid,
1922 "Error adding ring element to xfer ring\n");
1923 return;
1924 }
1925 /* get next sg tre location we can use */
1926 ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
1927 if (unlikely(ret)) {
1928 GPII_CRITIC(gpii, gpii_chan->chid,
1929 "Error adding ring element to sg ring\n");
1930 return;
1931 }
1932
1933 /* copy the tre info */
1934 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
1935 (*sg_tre)->ptr = gpi_tre;
1936 (*sg_tre)->wp = ch_tre;
1937 *wp = ch_tre;
1938}
1939
1940/* reset and restart transfer channel */
1941int gpi_terminate_all(struct dma_chan *chan)
1942{
1943 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1944 struct gpii *gpii = gpii_chan->gpii;
1945 int schid, echid, i;
1946 int ret = 0;
1947
1948 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1949 mutex_lock(&gpii->ctrl_lock);
1950
1951 /*
1952 * treat both channels as a group if its protocol is not UART
1953 * STOP, RESET, or START needs to be in lockstep
1954 */
1955 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1956 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1957 MAX_CHANNELS_PER_GPII;
1958
1959 /* stop the channel */
1960 for (i = schid; i < echid; i++) {
1961 gpii_chan = &gpii->gpii_chan[i];
1962
1963 /* disable ch state so no more TRE processing */
1964 write_lock_irq(&gpii->pm_lock);
1965 gpii_chan->pm_state = PREPARE_TERMINATE;
1966 write_unlock_irq(&gpii->pm_lock);
1967
1968 /* send command to Stop the channel */
1969 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1970 if (ret)
1971 GPII_ERR(gpii, gpii_chan->chid,
1972 "Error Stopping Channel:%d resetting anyway\n",
1973 ret);
1974 }
1975
1976 /* reset the channels (clears any pending tre) */
1977 for (i = schid; i < echid; i++) {
1978 gpii_chan = &gpii->gpii_chan[i];
1979
1980 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1981 if (ret) {
1982 GPII_ERR(gpii, gpii_chan->chid,
1983 "Error resetting channel ret:%d\n", ret);
1984 goto terminate_exit;
1985 }
1986
1987 /* reprogram channel CNTXT */
1988 ret = gpi_alloc_chan(gpii_chan, false);
1989 if (ret) {
1990 GPII_ERR(gpii, gpii_chan->chid,
1991 "Error alloc_channel ret:%d\n", ret);
1992 goto terminate_exit;
1993 }
1994 }
1995
1996 /* restart the channels */
1997 for (i = schid; i < echid; i++) {
1998 gpii_chan = &gpii->gpii_chan[i];
1999
2000 ret = gpi_start_chan(gpii_chan);
2001 if (ret) {
2002 GPII_ERR(gpii, gpii_chan->chid,
2003 "Error Starting Channel ret:%d\n", ret);
2004 goto terminate_exit;
2005 }
2006 }
2007
2008terminate_exit:
2009 mutex_unlock(&gpii->ctrl_lock);
2010 return ret;
2011}
2012
2013/* pause dma transfer for all channels */
2014static int gpi_pause(struct dma_chan *chan)
2015{
2016 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2017 struct gpii *gpii = gpii_chan->gpii;
2018 int i, ret;
2019
2020 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
2021 mutex_lock(&gpii->ctrl_lock);
2022
2023 /*
2024 * pause/resume are per gpii not per channel, so
2025 * client needs to call pause only once
2026 */
2027 if (gpii->pm_state == PAUSE_STATE) {
2028 GPII_INFO(gpii, gpii_chan->chid,
2029 "channel is already paused\n");
2030 mutex_unlock(&gpii->ctrl_lock);
2031 return 0;
2032 }
2033
2034 /* send stop command to stop the channels */
2035 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2036 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
2037 if (ret) {
2038 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2039 "Error stopping chan, ret:%d\n", ret);
2040 mutex_unlock(&gpii->ctrl_lock);
2041 return ret;
2042 }
2043 }
2044
2045 disable_irq(gpii->irq);
2046
2047 /* Wait for threads to complete out */
2048 tasklet_kill(&gpii->ev_task);
2049
2050 write_lock_irq(&gpii->pm_lock);
2051 gpii->pm_state = PAUSE_STATE;
2052 write_unlock_irq(&gpii->pm_lock);
2053 mutex_unlock(&gpii->ctrl_lock);
2054
2055 return 0;
2056}
2057
2058/* resume dma transfer */
2059static int gpi_resume(struct dma_chan *chan)
2060{
2061 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2062 struct gpii *gpii = gpii_chan->gpii;
2063 int i;
2064 int ret;
2065
2066 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2067
2068 mutex_lock(&gpii->ctrl_lock);
2069 if (gpii->pm_state == ACTIVE_STATE) {
2070 GPII_INFO(gpii, gpii_chan->chid,
2071 "channel is already active\n");
2072 mutex_unlock(&gpii->ctrl_lock);
2073 return 0;
2074 }
2075
2076 enable_irq(gpii->irq);
2077
2078 /* send start command to start the channels */
2079 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2080 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2081 if (ret) {
2082 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2083 "Erro starting chan, ret:%d\n", ret);
2084 mutex_unlock(&gpii->ctrl_lock);
2085 return ret;
2086 }
2087 }
2088
2089 write_lock_irq(&gpii->pm_lock);
2090 gpii->pm_state = ACTIVE_STATE;
2091 write_unlock_irq(&gpii->pm_lock);
2092 mutex_unlock(&gpii->ctrl_lock);
2093
2094 return 0;
2095}
2096
2097void gpi_desc_free(struct virt_dma_desc *vd)
2098{
2099 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2100
2101 kfree(gpi_desc);
2102}
2103
2104/* copy tre into transfer ring */
2105struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2106 struct scatterlist *sgl,
2107 unsigned int sg_len,
2108 enum dma_transfer_direction direction,
2109 unsigned long flags,
2110 void *context)
2111{
2112 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2113 struct gpii *gpii = gpii_chan->gpii;
2114 u32 nr, sg_nr;
2115 u32 nr_req = 0;
2116 int i, j;
2117 struct scatterlist *sg;
2118 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
2119 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
2120 void *tre, *wp = NULL;
2121 struct sg_tre *sg_tre = NULL;
2122 const gfp_t gfp = GFP_ATOMIC;
2123 struct gpi_desc *gpi_desc;
2124
2125 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2126
2127 if (!is_slave_direction(direction)) {
2128 GPII_ERR(gpii, gpii_chan->chid,
2129 "invalid dma direction: %d\n", direction);
2130 return NULL;
2131 }
2132
2133 /* calculate # of elements required & available */
2134 nr = gpi_ring_num_elements_avail(ch_ring);
2135 sg_nr = gpi_ring_num_elements_avail(sg_ring);
2136 for_each_sg(sgl, sg, sg_len, i) {
2137 GPII_VERB(gpii, gpii_chan->chid,
2138 "%d of %u len:%u\n", i, sg_len, sg->length);
2139 nr_req += (sg->length / ch_ring->el_size);
2140 }
2141 GPII_VERB(gpii, gpii_chan->chid,
2142 "nr_elements_avail:%u sg_avail:%u required:%u\n",
2143 nr, sg_nr, nr_req);
2144
2145 if (nr < nr_req || sg_nr < nr_req) {
2146 GPII_ERR(gpii, gpii_chan->chid,
2147 "not enough space in ring, avail:%u,%u required:%u\n",
2148 nr, sg_nr, nr_req);
2149 return NULL;
2150 }
2151
2152 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2153 if (!gpi_desc) {
2154 GPII_ERR(gpii, gpii_chan->chid,
2155 "out of memory for descriptor\n");
2156 return NULL;
2157 }
2158
2159 /* copy each tre into transfer ring */
2160 for_each_sg(sgl, sg, sg_len, i)
2161 for (j = 0, tre = sg_virt(sg); j < sg->length;
2162 j += ch_ring->el_size, tre += ch_ring->el_size)
2163 gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
2164
2165 /* set up the descriptor */
2166 gpi_desc->db = ch_ring->wp;
2167 gpi_desc->wp = wp;
2168 gpi_desc->sg_tre = sg_tre;
2169 gpi_desc->gpii_chan = gpii_chan;
2170 GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
2171 to_physical(ch_ring, ch_ring->wp),
2172 to_physical(ch_ring, ch_ring->rp));
2173
2174 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2175}
2176
2177/* rings transfer ring db to being transfer */
2178static void gpi_issue_pending(struct dma_chan *chan)
2179{
2180 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2181 struct gpii *gpii = gpii_chan->gpii;
2182 unsigned long flags, pm_lock_flags;
2183 struct virt_dma_desc *vd = NULL;
2184 struct gpi_desc *gpi_desc;
2185
2186 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2187
2188 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2189
2190 /* move all submitted discriptors to issued list */
2191 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2192 if (vchan_issue_pending(&gpii_chan->vc))
2193 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2194 struct virt_dma_desc, node);
2195 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2196
2197 /* nothing to do list is empty */
2198 if (!vd) {
2199 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2200 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2201 return;
2202 }
2203
2204 gpi_desc = to_gpi_desc(vd);
2205 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2206 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2207}
2208
2209/* configure or issue async command */
2210static int gpi_config(struct dma_chan *chan,
2211 struct dma_slave_config *config)
2212{
2213 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2214 struct gpii *gpii = gpii_chan->gpii;
2215 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2216 const int ev_factor = gpii->gpi_dev->ev_factor;
2217 u32 elements;
2218 int i = 0;
2219 int ret = 0;
2220
2221 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2222 if (!gpi_ctrl) {
2223 GPII_ERR(gpii, gpii_chan->chid,
2224 "no config ctrl data provided");
2225 return -EINVAL;
2226 }
2227
2228 mutex_lock(&gpii->ctrl_lock);
2229
2230 switch (gpi_ctrl->cmd) {
2231 case MSM_GPI_INIT:
2232 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2233
2234 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2235 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2236 gpii_chan->pm_state = CONFIG_STATE;
2237
2238 /* check if both channels are configured before continue */
2239 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2240 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2241 goto exit_gpi_init;
2242
2243 /* configure to highest priority from two channels */
2244 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2245 gpii->gpii_chan[1].priority);
2246
2247 /* protocol must be same for both channels */
2248 if (gpii->gpii_chan[0].protocol !=
2249 gpii->gpii_chan[1].protocol) {
2250 GPII_ERR(gpii, gpii_chan->chid,
2251 "protocol did not match protocol %u != %u\n",
2252 gpii->gpii_chan[0].protocol,
2253 gpii->gpii_chan[1].protocol);
2254 ret = -EINVAL;
2255 goto exit_gpi_init;
2256 }
2257 gpii->protocol = gpii_chan->protocol;
2258
2259 /* allocate memory for event ring */
2260 elements = max(gpii->gpii_chan[0].req_tres,
2261 gpii->gpii_chan[1].req_tres);
2262 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
2263 sizeof(union gpi_event), gpii, true);
2264 if (ret) {
2265 GPII_ERR(gpii, gpii_chan->chid,
2266 "error allocating mem for ev ring\n");
2267 goto exit_gpi_init;
2268 }
2269
2270 /* configure interrupts */
2271 write_lock_irq(&gpii->pm_lock);
2272 gpii->pm_state = PREPARE_HARDWARE;
2273 write_unlock_irq(&gpii->pm_lock);
2274 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2275 if (ret) {
2276 GPII_ERR(gpii, gpii_chan->chid,
2277 "error config. interrupts, ret:%d\n", ret);
2278 goto error_config_int;
2279 }
2280
2281 /* allocate event rings */
2282 ret = gpi_alloc_ev_chan(gpii);
2283 if (ret) {
2284 GPII_ERR(gpii, gpii_chan->chid,
2285 "error alloc_ev_chan:%d\n", ret);
2286 goto error_alloc_ev_ring;
2287 }
2288
2289 /* Allocate all channels */
2290 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2291 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2292 if (ret) {
2293 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2294 "Error allocating chan:%d\n", ret);
2295 goto error_alloc_chan;
2296 }
2297 }
2298
2299 /* start channels */
2300 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2301 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2302 if (ret) {
2303 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2304 "Error start chan:%d\n", ret);
2305 goto error_start_chan;
2306 }
2307 }
2308
2309 break;
2310 case MSM_GPI_CMD_UART_SW_STALE:
2311 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2312 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2313 break;
2314 case MSM_GPI_CMD_UART_RFR_READY:
2315 GPII_INFO(gpii, gpii_chan->chid,
2316 "sending UART RFR READY cmd\n");
2317 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2318 break;
2319 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2320 GPII_INFO(gpii, gpii_chan->chid,
2321 "sending UART RFR READY NOT READY cmd\n");
2322 ret = gpi_send_cmd(gpii, gpii_chan,
2323 GPI_CH_CMD_UART_RFR_NOT_READY);
2324 break;
2325 default:
2326 GPII_ERR(gpii, gpii_chan->chid,
2327 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2328 ret = -EINVAL;
2329 }
2330
2331 mutex_unlock(&gpii->ctrl_lock);
2332 return ret;
2333
2334error_start_chan:
2335 for (i = i - 1; i >= 0; i++) {
2336 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2337 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2338 }
2339 i = 2;
2340error_alloc_chan:
2341 for (i = i - 1; i >= 0; i--)
2342 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2343error_alloc_ev_ring:
2344 gpi_disable_interrupts(gpii);
2345error_config_int:
2346 gpi_free_ring(&gpii->ev_ring, gpii);
2347exit_gpi_init:
2348 mutex_unlock(&gpii->ctrl_lock);
2349 return ret;
2350}
2351
2352/* release all channel resources */
2353static void gpi_free_chan_resources(struct dma_chan *chan)
2354{
2355 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2356 struct gpii *gpii = gpii_chan->gpii;
2357 enum gpi_pm_state cur_state;
2358 int ret, i;
2359
2360 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2361
2362 mutex_lock(&gpii->ctrl_lock);
2363
2364 cur_state = gpii_chan->pm_state;
2365
2366 /* disable ch state so no more TRE processing for this channel */
2367 write_lock_irq(&gpii->pm_lock);
2368 gpii_chan->pm_state = PREPARE_TERMINATE;
2369 write_unlock_irq(&gpii->pm_lock);
2370
2371 /* attemp to do graceful hardware shutdown */
2372 if (cur_state == ACTIVE_STATE) {
2373 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2374 if (ret)
2375 GPII_ERR(gpii, gpii_chan->chid,
2376 "error stopping channel:%d\n", ret);
2377
2378 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2379 if (ret)
2380 GPII_ERR(gpii, gpii_chan->chid,
2381 "error resetting channel:%d\n", ret);
2382
2383 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2384 }
2385
2386 /* free all allocated memory */
2387 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2388 gpi_free_ring(&gpii_chan->sg_ring, gpii);
2389 vchan_free_chan_resources(&gpii_chan->vc);
2390
2391 write_lock_irq(&gpii->pm_lock);
2392 gpii_chan->pm_state = DISABLE_STATE;
2393 write_unlock_irq(&gpii->pm_lock);
2394
2395 /* if other rings are still active exit */
2396 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2397 if (gpii->gpii_chan[i].ch_ring.configured)
2398 goto exit_free;
2399
2400 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2401
2402 /* deallocate EV Ring */
2403 cur_state = gpii->pm_state;
2404 write_lock_irq(&gpii->pm_lock);
2405 gpii->pm_state = PREPARE_TERMINATE;
2406 write_unlock_irq(&gpii->pm_lock);
2407
2408 /* wait for threads to complete out */
2409 tasklet_kill(&gpii->ev_task);
2410
2411 /* send command to de allocate event ring */
2412 if (cur_state == ACTIVE_STATE)
2413 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2414
2415 gpi_free_ring(&gpii->ev_ring, gpii);
2416
2417 /* disable interrupts */
2418 if (cur_state == ACTIVE_STATE)
2419 gpi_disable_interrupts(gpii);
2420
2421 /* set final state to disable */
2422 write_lock_irq(&gpii->pm_lock);
2423 gpii->pm_state = DISABLE_STATE;
2424 write_unlock_irq(&gpii->pm_lock);
2425
2426exit_free:
2427 mutex_unlock(&gpii->ctrl_lock);
2428}
2429
2430/* allocate channel resources */
2431static int gpi_alloc_chan_resources(struct dma_chan *chan)
2432{
2433 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2434 struct gpii *gpii = gpii_chan->gpii;
2435 int ret;
2436
2437 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2438
2439 mutex_lock(&gpii->ctrl_lock);
2440
2441 /* allocate memory for transfer ring */
2442 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
2443 sizeof(struct msm_gpi_tre), gpii, true);
2444 if (ret) {
2445 GPII_ERR(gpii, gpii_chan->chid,
2446 "error allocating xfer ring, ret:%d\n", ret);
2447 goto xfer_alloc_err;
2448 }
2449
2450 ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
2451 sizeof(struct sg_tre), gpii, false);
2452 if (ret) {
2453 GPII_ERR(gpii, gpii_chan->chid,
2454 "error allocating sg ring, ret:%d\n", ret);
2455 goto sg_alloc_error;
2456 }
2457 mutex_unlock(&gpii->ctrl_lock);
2458
2459 return 0;
2460
2461sg_alloc_error:
2462 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2463xfer_alloc_err:
2464 mutex_unlock(&gpii->ctrl_lock);
2465
2466 return ret;
2467}
2468
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002469static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2470{
2471 int gpii;
2472 struct gpii_chan *tx_chan, *rx_chan;
2473
2474 /* check if same seid is already configured for another chid */
2475 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2476 if (!((1 << gpii) & gpi_dev->gpii_mask))
2477 continue;
2478
2479 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2480 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2481
2482 if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2483 return gpii;
2484 if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2485 return gpii;
2486 }
2487
2488 /* no channels configured with same seid, return next avail gpii */
2489 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2490 if (!((1 << gpii) & gpi_dev->gpii_mask))
2491 continue;
2492
2493 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2494 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2495
2496 /* check if gpii is configured */
2497 if (tx_chan->vc.chan.client_count ||
2498 rx_chan->vc.chan.client_count)
2499 continue;
2500
2501 /* found a free gpii */
2502 return gpii;
2503 }
2504
2505 /* no gpii instance available to use */
2506 return -EIO;
2507}
2508
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002509/* gpi_of_dma_xlate: open client requested channel */
2510static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2511 struct of_dma *of_dma)
2512{
2513 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002514 u32 seid, chid;
2515 int gpii;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002516 struct gpii_chan *gpii_chan;
2517
2518 if (args->args_count < REQ_OF_DMA_ARGS) {
2519 GPI_ERR(gpi_dev,
2520 "gpii require minimum 6 args, client passed:%d args\n",
2521 args->args_count);
2522 return NULL;
2523 }
2524
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002525 chid = args->args[0];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002526 if (chid >= MAX_CHANNELS_PER_GPII) {
2527 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2528 return NULL;
2529 }
2530
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002531 seid = args->args[1];
2532
2533 /* find next available gpii to use */
2534 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2535 if (gpii < 0) {
2536 GPI_ERR(gpi_dev, "no available gpii instances\n");
2537 return NULL;
2538 }
2539
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002540 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002541 if (gpii_chan->vc.chan.client_count) {
2542 GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
2543 gpii, chid, gpii_chan->seid);
2544 return NULL;
2545 }
2546
2547 /* get ring size, protocol, se_id, and priority */
2548 gpii_chan->seid = seid;
2549 gpii_chan->protocol = args->args[2];
2550 gpii_chan->req_tres = args->args[3];
2551 gpii_chan->priority = args->args[4];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002552
2553 GPI_LOG(gpi_dev,
2554 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2555 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2556 gpii_chan->protocol);
2557
2558 return dma_get_slave_channel(&gpii_chan->vc.chan);
2559}
2560
2561/* gpi_setup_debug - setup debug capabilities */
2562static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2563{
2564 char node_name[GPI_LABEL_SIZE];
2565 const umode_t mode = 0600;
2566 int i;
2567
2568 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2569 (u64)gpi_dev->res->start);
2570
2571 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2572 node_name, 0);
2573 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2574 if (!IS_ERR_OR_NULL(pdentry)) {
2575 snprintf(node_name, sizeof(node_name), "%llx",
2576 (u64)gpi_dev->res->start);
2577 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2578 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2579 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2580 &gpi_dev->ipc_log_lvl);
2581 debugfs_create_u32("klog_lvl", mode,
2582 gpi_dev->dentry, &gpi_dev->klog_lvl);
2583 }
2584 }
2585
2586 for (i = 0; i < gpi_dev->max_gpii; i++) {
2587 struct gpii *gpii;
2588
2589 if (!((1 << i) & gpi_dev->gpii_mask))
2590 continue;
2591
2592 gpii = &gpi_dev->gpiis[i];
2593 snprintf(gpii->label, sizeof(gpii->label),
2594 "%s%llx_gpii%d",
2595 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2596 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2597 gpii->label, 0);
2598 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2599 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2600
2601 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2602 continue;
2603
2604 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2605 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2606 if (IS_ERR_OR_NULL(gpii->dentry))
2607 continue;
2608
2609 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2610 &gpii->ipc_log_lvl);
2611 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2612 &gpii->klog_lvl);
2613 }
2614}
2615
2616static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2617{
Tony Truong0bed2862017-06-21 16:38:34 -07002618 u64 size = PAGE_SIZE;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002619 dma_addr_t base = 0x0;
2620 struct dma_iommu_mapping *map;
2621 int attr, ret;
2622
2623 map = arm_iommu_create_mapping(&platform_bus_type, base, size);
2624 if (IS_ERR_OR_NULL(map)) {
2625 ret = PTR_ERR(map) ? : -EIO;
2626 GPI_ERR(gpi_dev, "error create_mapping, ret:%d\n", ret);
2627 return ret;
2628 }
2629
2630 attr = 1;
2631 ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_ATOMIC, &attr);
2632 if (ret) {
2633 GPI_ERR(gpi_dev, "error setting ATTTR_ATOMIC, ret:%d\n", ret);
2634 goto error_smmu;
2635 }
2636
2637 attr = 1;
2638 ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_S1_BYPASS, &attr);
2639 if (ret) {
2640 GPI_ERR(gpi_dev, "error setting S1_BYPASS, ret:%d\n", ret);
2641 goto error_smmu;
2642 }
2643
2644 ret = arm_iommu_attach_device(gpi_dev->dev, map);
2645 if (ret) {
2646 GPI_ERR(gpi_dev, "error iommu_attach, ret:%d\n", ret);
2647 goto error_smmu;
2648 }
2649
2650 ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
2651 if (ret) {
2652 GPI_ERR(gpi_dev, "error setting dma_mask, ret:%d\n", ret);
2653 goto error_set_mask;
2654 }
2655
2656 return ret;
2657
2658error_set_mask:
2659 arm_iommu_detach_device(gpi_dev->dev);
2660error_smmu:
2661 arm_iommu_release_mapping(map);
2662 return ret;
2663}
2664
2665static int gpi_probe(struct platform_device *pdev)
2666{
2667 struct gpi_dev *gpi_dev;
2668 int ret, i;
2669
2670 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2671 if (!gpi_dev)
2672 return -ENOMEM;
2673
2674 gpi_dev->dev = &pdev->dev;
2675 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2676 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2677 "gpi-top");
2678 if (!gpi_dev->res) {
2679 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2680 return -EINVAL;
2681 }
2682 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2683 resource_size(gpi_dev->res));
2684 if (!gpi_dev->regs) {
2685 GPI_ERR(gpi_dev, "IO remap failed\n");
2686 return -EFAULT;
2687 }
2688
2689 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2690 &gpi_dev->max_gpii);
2691 if (ret) {
2692 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2693 return ret;
2694 }
2695
2696 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2697 &gpi_dev->gpii_mask);
2698 if (ret) {
2699 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2700 return ret;
2701 }
2702
2703 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2704 &gpi_dev->ev_factor);
2705 if (ret) {
2706 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2707 return ret;
2708 }
2709
2710 ret = gpi_smmu_init(gpi_dev);
2711 if (ret) {
2712 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2713 return ret;
2714 }
2715
2716 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2717 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2718 GFP_KERNEL);
2719 if (!gpi_dev->gpiis)
2720 return -ENOMEM;
2721
2722
2723 /* setup all the supported gpii */
2724 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2725 for (i = 0; i < gpi_dev->max_gpii; i++) {
2726 struct gpii *gpii = &gpi_dev->gpiis[i];
2727 int chan;
2728
2729 if (!((1 << i) & gpi_dev->gpii_mask))
2730 continue;
2731
2732 /* set up ev cntxt register map */
2733 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2734 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2735 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2736 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2737 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2738 CNTXT_2_RING_BASE_LSB;
2739 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2740 CNTXT_4_RING_RP_LSB;
2741 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2742 CNTXT_6_RING_WP_LSB;
2743 gpii->ev_cmd_reg = gpi_dev->regs +
2744 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2745 gpii->ieob_src_reg = gpi_dev->regs +
2746 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2747 gpii->ieob_clr_reg = gpi_dev->regs +
2748 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2749
2750 /* set up irq */
2751 ret = platform_get_irq(pdev, i);
2752 if (ret < 0) {
2753 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2754 i, ret);
2755 return ret;
2756 }
2757 gpii->irq = ret;
2758
2759 /* set up channel specific register info */
2760 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2761 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2762
2763 /* set up ch cntxt register map */
2764 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2765 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2766 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2767 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2768 gpii_chan->ch_ring_base_lsb_reg =
2769 gpii_chan->ch_cntxt_base_reg +
2770 CNTXT_2_RING_BASE_LSB;
2771 gpii_chan->ch_ring_rp_lsb_reg =
2772 gpii_chan->ch_cntxt_base_reg +
2773 CNTXT_4_RING_RP_LSB;
2774 gpii_chan->ch_ring_wp_lsb_reg =
2775 gpii_chan->ch_cntxt_base_reg +
2776 CNTXT_6_RING_WP_LSB;
2777 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2778 GPI_GPII_n_CH_CMD_OFFS(i);
2779
2780 /* vchan setup */
2781 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2782 gpii_chan->vc.desc_free = gpi_desc_free;
2783 gpii_chan->chid = chan;
2784 gpii_chan->gpii = gpii;
2785 gpii_chan->dir = GPII_CHAN_DIR[chan];
2786 }
2787 mutex_init(&gpii->ctrl_lock);
2788 rwlock_init(&gpii->pm_lock);
2789 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2790 (unsigned long)gpii);
2791 init_completion(&gpii->cmd_completion);
2792 gpii->gpii_id = i;
2793 gpii->regs = gpi_dev->regs;
2794 gpii->gpi_dev = gpi_dev;
2795 atomic_set(&gpii->dbg_index, 0);
2796 }
2797
2798 platform_set_drvdata(pdev, gpi_dev);
2799
2800 /* clear and Set capabilities */
2801 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2802 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2803
2804 /* configure dmaengine apis */
2805 gpi_dev->dma_device.directions =
2806 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2807 gpi_dev->dma_device.residue_granularity =
2808 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2809 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2810 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2811 gpi_dev->dma_device.device_alloc_chan_resources =
2812 gpi_alloc_chan_resources;
2813 gpi_dev->dma_device.device_free_chan_resources =
2814 gpi_free_chan_resources;
2815 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2816 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2817 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2818 gpi_dev->dma_device.device_config = gpi_config;
2819 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2820 gpi_dev->dma_device.dev = gpi_dev->dev;
2821 gpi_dev->dma_device.device_pause = gpi_pause;
2822 gpi_dev->dma_device.device_resume = gpi_resume;
2823
2824 /* register with dmaengine framework */
2825 ret = dma_async_device_register(&gpi_dev->dma_device);
2826 if (ret) {
2827 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2828 return ret;
2829 }
2830
2831 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2832 gpi_of_dma_xlate, gpi_dev);
2833 if (ret) {
2834 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2835 return ret;
2836 }
2837
2838 /* setup debug capabilities */
2839 gpi_setup_debug(gpi_dev);
2840 GPI_LOG(gpi_dev, "probe success\n");
2841
2842 return ret;
2843}
2844
2845static const struct of_device_id gpi_of_match[] = {
2846 { .compatible = "qcom,gpi-dma" },
2847 {}
2848};
2849MODULE_DEVICE_TABLE(of, gpi_of_match);
2850
2851static struct platform_driver gpi_driver = {
2852 .probe = gpi_probe,
2853 .driver = {
2854 .name = GPI_DMA_DRV_NAME,
2855 .of_match_table = gpi_of_match,
2856 },
2857};
2858
2859static int __init gpi_init(void)
2860{
2861 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2862 return platform_driver_register(&gpi_driver);
2863}
2864module_init(gpi_init)
2865
2866MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2867MODULE_LICENSE("GPL v2");