blob: 94a8e6a8b2ae189530d91674d1df36232622ed69 [file] [log] [blame]
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
59#define GPII_REG(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, \
62 ch, __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_VERB(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
70 pr_info("%s:%u:%s: " fmt, gpii->label, \
71 ch, __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_INFO(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_INFO) \
79 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86#define GPII_ERR(gpii, ch, fmt, ...) do { \
87 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
88 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
89 __func__, ##__VA_ARGS__); \
90 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
91 ipc_log_string(gpii->ilctxt, \
92 "ch:%u %s: " fmt, ch, \
93 __func__, ##__VA_ARGS__); \
94 } while (0)
95#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
96 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
97 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
98 __func__, ##__VA_ARGS__); \
99 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
100 ipc_log_string(gpii->ilctxt, \
101 "ch:%u %s: " fmt, ch, \
102 __func__, ##__VA_ARGS__); \
103 } while (0)
104
105enum DEBUG_LOG_LVL {
106 LOG_LVL_MASK_ALL,
107 LOG_LVL_CRITICAL,
108 LOG_LVL_ERROR,
109 LOG_LVL_INFO,
110 LOG_LVL_VERBOSE,
111 LOG_LVL_REG_ACCESS,
112};
113
114enum EV_PRIORITY {
115 EV_PRIORITY_ISR,
116 EV_PRIORITY_TASKLET,
117};
118
119#define GPI_DMA_DRV_NAME "gpi_dma"
120#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
121#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
122#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
123#define IPC_LOG_PAGES (40)
124#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
125#else
126#define IPC_LOG_PAGES (2)
127#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
128#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
129#endif
130
131#define GPI_LABEL_SIZE (256)
132#define GPI_DBG_COMMON (99)
133#define MAX_CHANNELS_PER_GPII (2)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700134#define GPI_TX_CHAN (0)
135#define GPI_RX_CHAN (1)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700136#define CMD_TIMEOUT_MS (50)
137#define STATE_IGNORE (U32_MAX)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700138#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700139
140struct __packed gpi_error_log_entry {
141 u32 routine : 4;
142 u32 type : 4;
143 u32 reserved0 : 4;
144 u32 code : 4;
145 u32 reserved1 : 3;
146 u32 chid : 5;
147 u32 reserved2 : 1;
148 u32 chtype : 1;
149 u32 ee : 1;
150};
151
152struct __packed xfer_compl_event {
153 u64 ptr;
154 u32 length : 24;
155 u8 code;
156 u16 status;
157 u8 type;
158 u8 chid;
159};
160
161struct __packed immediate_data_event {
162 u8 data_bytes[8];
163 u8 length : 4;
164 u8 resvd : 4;
165 u16 tre_index;
166 u8 code;
167 u16 status;
168 u8 type;
169 u8 chid;
170};
171
172struct __packed qup_notif_event {
173 u32 status;
174 u32 time;
175 u32 count :24;
176 u8 resvd;
177 u16 resvd1;
178 u8 type;
179 u8 chid;
180};
181
182struct __packed gpi_ere {
183 u32 dword[4];
184};
185
186enum GPI_EV_TYPE {
187 XFER_COMPLETE_EV_TYPE = 0x22,
188 IMMEDIATE_DATA_EV_TYPE = 0x30,
189 QUP_NOTIF_EV_TYPE = 0x31,
190 STALE_EV_TYPE = 0xFF,
191};
192
193union __packed gpi_event {
194 struct __packed xfer_compl_event xfer_compl_event;
195 struct __packed immediate_data_event immediate_data_event;
196 struct __packed qup_notif_event qup_notif_event;
197 struct __packed gpi_ere gpi_ere;
198};
199
200enum gpii_irq_settings {
201 DEFAULT_IRQ_SETTINGS,
202 MASK_IEOB_SETTINGS,
203};
204
205enum gpi_ev_state {
206 DEFAULT_EV_CH_STATE = 0,
207 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
208 EV_STATE_ALLOCATED,
209 MAX_EV_STATES
210};
211
212static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
213 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
214 [EV_STATE_ALLOCATED] = "ALLOCATED",
215};
216
217#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
218 "INVALID" : gpi_ev_state_str[state])
219
220enum gpi_ch_state {
221 DEFAULT_CH_STATE = 0x0,
222 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
223 CH_STATE_ALLOCATED = 0x1,
224 CH_STATE_STARTED = 0x2,
225 CH_STATE_STOPPED = 0x3,
226 CH_STATE_STOP_IN_PROC = 0x4,
227 CH_STATE_ERROR = 0xf,
228 MAX_CH_STATES
229};
230
231static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
232 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
233 [CH_STATE_ALLOCATED] = "ALLOCATED",
234 [CH_STATE_STARTED] = "STARTED",
235 [CH_STATE_STOPPED] = "STOPPED",
236 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
237 [CH_STATE_ERROR] = "ERROR",
238};
239
240#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
241 "INVALID" : gpi_ch_state_str[state])
242
243enum gpi_cmd {
244 GPI_CH_CMD_BEGIN,
245 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
246 GPI_CH_CMD_START,
247 GPI_CH_CMD_STOP,
248 GPI_CH_CMD_RESET,
249 GPI_CH_CMD_DE_ALLOC,
250 GPI_CH_CMD_UART_SW_STALE,
251 GPI_CH_CMD_UART_RFR_READY,
252 GPI_CH_CMD_UART_RFR_NOT_READY,
253 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
254 GPI_EV_CMD_BEGIN,
255 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
256 GPI_EV_CMD_RESET,
257 GPI_EV_CMD_DEALLOC,
258 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
259 GPI_MAX_CMD,
260};
261
262#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
263
264static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
265 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
266 [GPI_CH_CMD_START] = "CH START",
267 [GPI_CH_CMD_STOP] = "CH STOP",
268 [GPI_CH_CMD_RESET] = "CH_RESET",
269 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
270 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
271 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
272 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
273 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
274 [GPI_EV_CMD_RESET] = "EV RESET",
275 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
276};
277
278#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
279 gpi_cmd_str[cmd])
280
281static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
282 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
283 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
284 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
285 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
286 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
287 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
288};
289
290#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
291 "INVALID" : gpi_cb_event_str[event])
292
293enum se_protocol {
294 SE_PROTOCOL_SPI = 1,
295 SE_PROTOCOL_UART = 2,
296 SE_PROTOCOL_I2C = 3,
297 SE_MAX_PROTOCOL
298};
299
300/*
301 * @DISABLE_STATE: no register access allowed
302 * @CONFIG_STATE: client has configured the channel
303 * @PREP_HARDWARE: register access is allowed
304 * however, no processing EVENTS
305 * @ACTIVE_STATE: channels are fully operational
306 * @PREPARE_TERIMNATE: graceful termination of channels
307 * register access is allowed
308 * @PAUSE_STATE: channels are active, but not processing any events
309 */
310enum gpi_pm_state {
311 DISABLE_STATE,
312 CONFIG_STATE,
313 PREPARE_HARDWARE,
314 ACTIVE_STATE,
315 PREPARE_TERMINATE,
316 PAUSE_STATE,
317 MAX_PM_STATE
318};
319
320#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
321
322static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
323 [DISABLE_STATE] = "DISABLE",
324 [CONFIG_STATE] = "CONFIG",
325 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
326 [ACTIVE_STATE] = "ACTIVE",
327 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
328 [PAUSE_STATE] = "PAUSE",
329};
330
331#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
332 "INVALID" : gpi_pm_state_str[state])
333
334static const struct {
335 enum gpi_cmd gpi_cmd;
336 u32 opcode;
337 u32 state;
338 u32 timeout_ms;
339} gpi_cmd_info[GPI_MAX_CMD] = {
340 {
341 GPI_CH_CMD_ALLOCATE,
342 GPI_GPII_n_CH_CMD_ALLOCATE,
343 CH_STATE_ALLOCATED,
344 CMD_TIMEOUT_MS,
345 },
346 {
347 GPI_CH_CMD_START,
348 GPI_GPII_n_CH_CMD_START,
349 CH_STATE_STARTED,
350 CMD_TIMEOUT_MS,
351 },
352 {
353 GPI_CH_CMD_STOP,
354 GPI_GPII_n_CH_CMD_STOP,
355 CH_STATE_STOPPED,
356 CMD_TIMEOUT_MS,
357 },
358 {
359 GPI_CH_CMD_RESET,
360 GPI_GPII_n_CH_CMD_RESET,
361 CH_STATE_ALLOCATED,
362 CMD_TIMEOUT_MS,
363 },
364 {
365 GPI_CH_CMD_DE_ALLOC,
366 GPI_GPII_n_CH_CMD_DE_ALLOC,
367 CH_STATE_NOT_ALLOCATED,
368 CMD_TIMEOUT_MS,
369 },
370 {
371 GPI_CH_CMD_UART_SW_STALE,
372 GPI_GPII_n_CH_CMD_UART_SW_STALE,
373 STATE_IGNORE,
374 CMD_TIMEOUT_MS,
375 },
376 {
377 GPI_CH_CMD_UART_RFR_READY,
378 GPI_GPII_n_CH_CMD_UART_RFR_READY,
379 STATE_IGNORE,
380 CMD_TIMEOUT_MS,
381 },
382 {
383 GPI_CH_CMD_UART_RFR_NOT_READY,
384 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
385 STATE_IGNORE,
386 CMD_TIMEOUT_MS,
387 },
388 {
389 GPI_EV_CMD_ALLOCATE,
390 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
391 EV_STATE_ALLOCATED,
392 CMD_TIMEOUT_MS,
393 },
394 {
395 GPI_EV_CMD_RESET,
396 GPI_GPII_n_EV_CH_CMD_RESET,
397 EV_STATE_ALLOCATED,
398 CMD_TIMEOUT_MS,
399 },
400 {
401 GPI_EV_CMD_DEALLOC,
402 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
403 EV_STATE_NOT_ALLOCATED,
404 CMD_TIMEOUT_MS,
405 },
406};
407
408struct gpi_ring {
409 void *pre_aligned;
410 size_t alloc_size;
411 phys_addr_t phys_addr;
412 dma_addr_t dma_handle;
413 void *base;
414 void *wp;
415 void *rp;
416 u32 len;
417 u32 el_size;
418 u32 elements;
419 bool configured;
420};
421
422struct sg_tre {
423 void *ptr;
424 void *wp; /* store chan wp for debugging */
425};
426
427struct gpi_dbg_log {
428 void *addr;
429 u64 time;
430 u32 val;
431 bool read;
432};
433
434struct gpi_dev {
435 struct dma_device dma_device;
436 struct device *dev;
437 struct resource *res;
438 void __iomem *regs;
439 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
440 u32 gpii_mask; /* gpii instances available for apps */
441 u32 ev_factor; /* ev ring length factor */
Sujeev Dias69484212017-08-31 10:06:53 -0700442 u32 smmu_cfg;
443 dma_addr_t iova_base;
444 size_t iova_size;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700445 struct gpii *gpiis;
446 void *ilctxt;
447 u32 ipc_log_lvl;
448 u32 klog_lvl;
449 struct dentry *dentry;
450};
451
452struct gpii_chan {
453 struct virt_dma_chan vc;
454 u32 chid;
455 u32 seid;
456 enum se_protocol protocol;
457 enum EV_PRIORITY priority; /* comes from clients DT node */
458 struct gpii *gpii;
459 enum gpi_ch_state ch_state;
460 enum gpi_pm_state pm_state;
461 void __iomem *ch_cntxt_base_reg;
462 void __iomem *ch_cntxt_db_reg;
463 void __iomem *ch_ring_base_lsb_reg,
464 *ch_ring_rp_lsb_reg,
465 *ch_ring_wp_lsb_reg;
466 void __iomem *ch_cmd_reg;
467 u32 req_tres; /* # of tre's client requested */
468 u32 dir;
469 struct gpi_ring ch_ring;
470 struct gpi_ring sg_ring; /* points to client scatterlist */
471 struct gpi_client_info client_info;
472};
473
474struct gpii {
475 u32 gpii_id;
476 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
477 struct gpi_dev *gpi_dev;
478 enum EV_PRIORITY ev_priority;
479 enum se_protocol protocol;
480 int irq;
481 void __iomem *regs; /* points to gpi top */
482 void __iomem *ev_cntxt_base_reg;
483 void __iomem *ev_cntxt_db_reg;
484 void __iomem *ev_ring_base_lsb_reg,
485 *ev_ring_rp_lsb_reg,
486 *ev_ring_wp_lsb_reg;
487 void __iomem *ev_cmd_reg;
488 void __iomem *ieob_src_reg;
489 void __iomem *ieob_clr_reg;
490 struct mutex ctrl_lock;
491 enum gpi_ev_state ev_state;
492 bool configured_irq;
493 enum gpi_pm_state pm_state;
494 rwlock_t pm_lock;
495 struct gpi_ring ev_ring;
496 struct tasklet_struct ev_task; /* event processing tasklet */
497 struct completion cmd_completion;
498 enum gpi_cmd gpi_cmd;
499 u32 cntxt_type_irq_msk;
500 void *ilctxt;
501 u32 ipc_log_lvl;
502 u32 klog_lvl;
503 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
504 atomic_t dbg_index;
505 char label[GPI_LABEL_SIZE];
506 struct dentry *dentry;
507};
508
509struct gpi_desc {
510 struct virt_dma_desc vd;
511 void *wp; /* points to TRE last queued during issue_pending */
512 struct sg_tre *sg_tre; /* points to last scatterlist */
513 void *db; /* DB register to program */
514 struct gpii_chan *gpii_chan;
515};
516
Sujeev Dias69484212017-08-31 10:06:53 -0700517#define GPI_SMMU_ATTACH BIT(0)
518#define GPI_SMMU_S1_BYPASS BIT(1)
519#define GPI_SMMU_FAST BIT(2)
520#define GPI_SMMU_ATOMIC BIT(3)
521
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700522const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
523 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
524};
525
526struct dentry *pdentry;
527static irqreturn_t gpi_handle_irq(int irq, void *data);
528static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
529static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
530static void gpi_process_events(struct gpii *gpii);
531
532static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
533{
534 return container_of(dma_chan, struct gpii_chan, vc.chan);
535}
536
537static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
538{
539 return container_of(vd, struct gpi_desc, vd);
540}
541
542static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
543 void *addr)
544{
545 return ring->phys_addr + (addr - ring->base);
546}
547
548static inline void *to_virtual(const struct gpi_ring *const ring,
549 phys_addr_t addr)
550{
551 return ring->base + (addr - ring->phys_addr);
552}
553
554#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
555static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
556{
557 u64 time = sched_clock();
558 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
559 u32 val;
560
561 val = readl_relaxed(addr);
562 index &= (GPI_DBG_LOG_SIZE - 1);
563 (gpii->dbg_log + index)->addr = addr;
564 (gpii->dbg_log + index)->time = time;
565 (gpii->dbg_log + index)->val = val;
566 (gpii->dbg_log + index)->read = true;
567 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
568 addr - gpii->regs, val);
569 return val;
570}
571static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
572{
573 u64 time = sched_clock();
574 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
575
576 index &= (GPI_DBG_LOG_SIZE - 1);
577 (gpii->dbg_log + index)->addr = addr;
578 (gpii->dbg_log + index)->time = time;
579 (gpii->dbg_log + index)->val = val;
580 (gpii->dbg_log + index)->read = false;
581
582 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
583 addr - gpii->regs, val);
584 writel_relaxed(val, addr);
585}
586#else
587static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
588{
589 u32 val = readl_relaxed(addr);
590
591 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
592 addr - gpii->regs, val);
593 return val;
594}
595static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
596{
597 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
598 addr - gpii->regs, val);
599 writel_relaxed(val, addr);
600}
601#endif
602
603/* gpi_write_reg_field - write to specific bit field */
604static inline void gpi_write_reg_field(struct gpii *gpii,
605 void __iomem *addr,
606 u32 mask,
607 u32 shift,
608 u32 val)
609{
610 u32 tmp = gpi_read_reg(gpii, addr);
611
612 tmp &= ~mask;
613 val = tmp | ((val << shift) & mask);
614 gpi_write_reg(gpii, addr, val);
615}
616
617static void gpi_disable_interrupts(struct gpii *gpii)
618{
619 struct {
620 u32 offset;
621 u32 mask;
622 u32 shift;
623 u32 val;
624 } default_reg[] = {
625 {
626 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
627 (gpii->gpii_id),
628 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
629 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
630 0,
631 },
632 {
633 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
634 (gpii->gpii_id),
635 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
636 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
637 0,
638 },
639 {
640 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
641 (gpii->gpii_id),
642 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
643 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
644 0,
645 },
646 {
647 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
648 (gpii->gpii_id),
649 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
650 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
651 0,
652 },
653 {
654 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
655 (gpii->gpii_id),
656 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
657 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
658 0,
659 },
660 {
661 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
662 (gpii->gpii_id),
663 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
664 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
665 0,
666 },
667 {
668 GPI_GPII_n_CNTXT_INTSET_OFFS
669 (gpii->gpii_id),
670 GPI_GPII_n_CNTXT_INTSET_BMSK,
671 GPI_GPII_n_CNTXT_INTSET_SHFT,
672 0,
673 },
674 { 0 },
675 };
676 int i;
677
678 for (i = 0; default_reg[i].offset; i++)
679 gpi_write_reg_field(gpii, gpii->regs +
680 default_reg[i].offset,
681 default_reg[i].mask,
682 default_reg[i].shift,
683 default_reg[i].val);
684 gpii->cntxt_type_irq_msk = 0;
685 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
686 gpii->configured_irq = false;
687}
688
689/* configure and enable interrupts */
690static int gpi_config_interrupts(struct gpii *gpii,
691 enum gpii_irq_settings settings,
692 bool mask)
693{
694 int ret;
695 int i;
696 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
697 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
698 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
699 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
700 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
701 struct {
702 u32 offset;
703 u32 mask;
704 u32 shift;
705 u32 val;
706 } default_reg[] = {
707 {
708 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
709 (gpii->gpii_id),
710 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
711 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
712 def_type,
713 },
714 {
715 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
716 (gpii->gpii_id),
717 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
718 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
719 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
720 },
721 {
722 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
723 (gpii->gpii_id),
724 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
725 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
726 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
727 },
728 {
729 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
730 (gpii->gpii_id),
731 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
732 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
733 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
734 },
735 {
736 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
737 (gpii->gpii_id),
738 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
739 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
740 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
741 },
742 {
743 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
744 (gpii->gpii_id),
745 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
746 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
747 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
748 },
749 {
750 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
751 (gpii->gpii_id),
752 U32_MAX,
753 0,
754 0x0,
755 },
756 {
757 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
758 (gpii->gpii_id),
759 U32_MAX,
760 0,
761 0x0,
762 },
763 {
764 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
765 (gpii->gpii_id),
766 U32_MAX,
767 0,
768 0x0,
769 },
770 {
771 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
772 (gpii->gpii_id),
773 U32_MAX,
774 0,
775 0x0,
776 },
777 {
778 GPI_GPII_n_CNTXT_INTSET_OFFS
779 (gpii->gpii_id),
780 GPI_GPII_n_CNTXT_INTSET_BMSK,
781 GPI_GPII_n_CNTXT_INTSET_SHFT,
782 0x01,
783 },
784 {
785 GPI_GPII_n_ERROR_LOG_OFFS
786 (gpii->gpii_id),
787 U32_MAX,
788 0,
789 0x00,
790 },
791 { 0 },
792 };
793
794 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
795 (gpii->configured_irq) ? 'F' : 'T',
796 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
797 (mask) ? 'T' : 'F');
798
799 if (gpii->configured_irq == false) {
800 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
801 gpi_handle_irq, IRQF_TRIGGER_HIGH,
802 gpii->label, gpii);
803 if (ret < 0) {
804 GPII_CRITIC(gpii, GPI_DBG_COMMON,
805 "error request irq:%d ret:%d\n",
806 gpii->irq, ret);
807 return ret;
808 }
809 }
810
811 if (settings == MASK_IEOB_SETTINGS) {
812 /*
813 * GPII only uses one EV ring per gpii so we can globally
814 * enable/disable IEOB interrupt
815 */
816 if (mask)
817 gpii->cntxt_type_irq_msk |=
818 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
819 else
820 gpii->cntxt_type_irq_msk &=
821 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
822 gpi_write_reg_field(gpii, gpii->regs +
823 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
824 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
825 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
826 gpii->cntxt_type_irq_msk);
827 } else {
828 for (i = 0; default_reg[i].offset; i++)
829 gpi_write_reg_field(gpii, gpii->regs +
830 default_reg[i].offset,
831 default_reg[i].mask,
832 default_reg[i].shift,
833 default_reg[i].val);
834 gpii->cntxt_type_irq_msk = def_type;
835 };
836
837 gpii->configured_irq = true;
838
839 return 0;
840}
841
842/* Sends gpii event or channel command */
843static int gpi_send_cmd(struct gpii *gpii,
844 struct gpii_chan *gpii_chan,
845 enum gpi_cmd gpi_cmd)
846{
847 u32 chid = MAX_CHANNELS_PER_GPII;
848 u32 cmd;
849 unsigned long timeout;
850 void __iomem *cmd_reg;
851
852 if (gpi_cmd >= GPI_MAX_CMD)
853 return -EINVAL;
854 if (IS_CHAN_CMD(gpi_cmd))
855 chid = gpii_chan->chid;
856
857 GPII_INFO(gpii, chid,
858 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
859
860 /* send opcode and wait for completion */
861 reinit_completion(&gpii->cmd_completion);
862 gpii->gpi_cmd = gpi_cmd;
863
864 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
865 gpii->ev_cmd_reg;
866 cmd = IS_CHAN_CMD(gpi_cmd) ?
867 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
868 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
869 gpi_write_reg(gpii, cmd_reg, cmd);
870 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
871 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
872
873 if (!timeout) {
874 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
875 TO_GPI_CMD_STR(gpi_cmd));
876 return -EIO;
877 }
878
879 /* confirm new ch state is correct , if the cmd is a state change cmd */
880 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
881 return 0;
882 if (IS_CHAN_CMD(gpi_cmd) &&
883 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
884 return 0;
885 if (!IS_CHAN_CMD(gpi_cmd) &&
886 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
887 return 0;
888
889 return -EIO;
890}
891
892/* program transfer ring DB register */
893static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
894 struct gpi_ring *ring,
895 void *wp)
896{
897 struct gpii *gpii = gpii_chan->gpii;
898 phys_addr_t p_wp;
899
900 p_wp = to_physical(ring, wp);
901 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
902}
903
904/* program event ring DB register */
905static inline void gpi_write_ev_db(struct gpii *gpii,
906 struct gpi_ring *ring,
907 void *wp)
908{
909 phys_addr_t p_wp;
910
911 p_wp = ring->phys_addr + (wp - ring->base);
912 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
913}
914
915/* notify client with generic event */
916static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
917 enum msm_gpi_cb_event event,
918 u64 status)
919{
920 struct gpii *gpii = gpii_chan->gpii;
921 struct gpi_client_info *client_info = &gpii_chan->client_info;
922 struct msm_gpi_cb msm_gpi_cb = {0};
923
924 GPII_ERR(gpii, gpii_chan->chid,
925 "notifying event:%s with status:%llu\n",
926 TO_GPI_CB_EVENT_STR(event), status);
927
928 msm_gpi_cb.cb_event = event;
929 msm_gpi_cb.status = status;
930 msm_gpi_cb.timestamp = sched_clock();
931 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
932 client_info->cb_param);
933}
934
935/* process transfer completion interrupt */
936static void gpi_process_ieob(struct gpii *gpii)
937{
938 u32 ieob_irq;
939
940 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
941 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
942 GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
943
944 /* process events based on priority */
945 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
946 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
947 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
948 tasklet_schedule(&gpii->ev_task);
949 } else {
950 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
951 gpi_process_events(gpii);
952 }
953}
954
955/* process channel control interrupt */
956static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
957{
958 u32 gpii_id = gpii->gpii_id;
959 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
960 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
961 u32 chid;
962 struct gpii_chan *gpii_chan;
963 u32 state;
964
965 /* clear the status */
966 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
967 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
968
969 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
970 if (!(BIT(chid) & ch_irq))
971 continue;
972
973 gpii_chan = &gpii->gpii_chan[chid];
974 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
975 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
976 CNTXT_0_CONFIG);
977 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
978 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
979
980 /*
981 * CH_CMD_DEALLOC cmd always successful. However cmd does
982 * not change hardware status. So overwriting software state
983 * to default state.
984 */
985 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
986 state = DEFAULT_CH_STATE;
987 gpii_chan->ch_state = state;
988 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
989 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
990
991 /*
992 * Triggering complete all if ch_state is not a stop in process.
993 * Stop in process is a transition state and we will wait for
994 * stop interrupt before notifying.
995 */
996 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
997 complete_all(&gpii->cmd_completion);
998
999 /* notifying clients if in error state */
1000 if (gpii_chan->ch_state == CH_STATE_ERROR)
1001 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
1002 __LINE__);
1003 }
1004}
1005
1006/* processing gpi level error interrupts */
1007static void gpi_process_glob_err_irq(struct gpii *gpii)
1008{
1009 u32 gpii_id = gpii->gpii_id;
1010 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1011 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1012 u32 error_log;
1013 u32 chid;
1014 struct gpii_chan *gpii_chan;
1015 struct gpi_client_info *client_info;
1016 struct msm_gpi_cb msm_gpi_cb;
1017 struct gpi_error_log_entry *log_entry =
1018 (struct gpi_error_log_entry *)&error_log;
1019
1020 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1021 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1022
1023 /* only error interrupt should be set */
1024 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1025 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1026 irq_stts);
1027 goto error_irq;
1028 }
1029
1030 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1031 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1032 gpi_write_reg(gpii, gpii->regs + offset, 0);
1033
1034 /* get channel info */
1035 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1036 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1037 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1038 chid);
1039 goto error_irq;
1040 }
1041
1042 gpii_chan = &gpii->gpii_chan[chid];
1043 client_info = &gpii_chan->client_info;
1044
1045 /* notify client with error log */
1046 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1047 msm_gpi_cb.error_log.routine = log_entry->routine;
1048 msm_gpi_cb.error_log.type = log_entry->type;
1049 msm_gpi_cb.error_log.error_code = log_entry->code;
1050 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1051 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1052 GPII_ERR(gpii, gpii_chan->chid,
1053 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1054 log_entry->ee, log_entry->chtype,
1055 msm_gpi_cb.error_log.routine,
1056 msm_gpi_cb.error_log.type,
1057 msm_gpi_cb.error_log.error_code);
1058 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1059 client_info->cb_param);
1060
1061 return;
1062
1063error_irq:
1064 for (chid = 0, gpii_chan = gpii->gpii_chan;
1065 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1066 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1067 irq_stts);
1068}
1069
1070/* gpii interrupt handler */
1071static irqreturn_t gpi_handle_irq(int irq, void *data)
1072{
1073 struct gpii *gpii = data;
1074 u32 type;
1075 unsigned long flags;
1076 u32 offset;
1077 u32 gpii_id = gpii->gpii_id;
1078
1079 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1080
1081 read_lock_irqsave(&gpii->pm_lock, flags);
1082
1083 /*
1084 * States are out of sync to receive interrupt
1085 * while software state is in DISABLE state, bailing out.
1086 */
1087 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1088 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1089 "receive interrupt while in %s state\n",
1090 TO_GPI_PM_STR(gpii->pm_state));
1091 goto exit_irq;
1092 }
1093
1094 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1095 type = gpi_read_reg(gpii, gpii->regs + offset);
1096
1097 do {
1098 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1099 type);
1100 /* global gpii error */
1101 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1102 GPII_ERR(gpii, GPI_DBG_COMMON,
1103 "processing global error irq\n");
1104 gpi_process_glob_err_irq(gpii);
1105 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1106 }
1107
1108 /* event control irq */
1109 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1110 u32 ev_state;
1111 u32 ev_ch_irq;
1112
1113 GPII_INFO(gpii, GPI_DBG_COMMON,
1114 "processing EV CTRL interrupt\n");
1115 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1116 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1117
1118 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1119 (gpii_id);
1120 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1121 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1122 CNTXT_0_CONFIG);
1123 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1124 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1125
1126 /*
1127 * CMD EV_CMD_DEALLOC is always successful. However
1128 * cmd does not change hardware status. So overwriting
1129 * software state to default state.
1130 */
1131 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1132 ev_state = DEFAULT_EV_CH_STATE;
1133
1134 gpii->ev_state = ev_state;
1135 GPII_INFO(gpii, GPI_DBG_COMMON,
1136 "setting EV state to %s\n",
1137 TO_GPI_EV_STATE_STR(gpii->ev_state));
1138 complete_all(&gpii->cmd_completion);
1139 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1140 }
1141
1142 /* channel control irq */
1143 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1144 GPII_INFO(gpii, GPI_DBG_COMMON,
1145 "process CH CTRL interrupts\n");
1146 gpi_process_ch_ctrl_irq(gpii);
1147 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1148 }
1149
1150 /* transfer complete interrupt */
1151 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1152 GPII_VERB(gpii, GPI_DBG_COMMON,
1153 "process IEOB interrupts\n");
1154 gpi_process_ieob(gpii);
1155 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1156 }
1157
1158 if (type) {
1159 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1160 "Unhandled interrupt status:0x%x\n", type);
1161 goto exit_irq;
1162 }
1163 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1164 type = gpi_read_reg(gpii, gpii->regs + offset);
1165 } while (type);
1166
1167exit_irq:
1168 read_unlock_irqrestore(&gpii->pm_lock, flags);
1169 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1170
1171 return IRQ_HANDLED;
1172}
1173
1174/* process qup notification events */
1175static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1176 struct qup_notif_event *notif_event)
1177{
1178 struct gpii *gpii = gpii_chan->gpii;
1179 struct gpi_client_info *client_info = &gpii_chan->client_info;
1180 struct msm_gpi_cb msm_gpi_cb;
1181
1182 GPII_VERB(gpii, gpii_chan->chid,
1183 "status:0x%x time:0x%x count:0x%x\n",
1184 notif_event->status, notif_event->time, notif_event->count);
1185
1186 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1187 msm_gpi_cb.status = notif_event->status;
1188 msm_gpi_cb.timestamp = notif_event->time;
1189 msm_gpi_cb.count = notif_event->count;
1190 GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
1191 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1192 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1193 client_info->cb_param);
1194}
1195
1196/* process DMA Immediate completion data events */
1197static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1198 struct immediate_data_event *imed_event)
1199{
1200 struct gpii *gpii = gpii_chan->gpii;
1201 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1202 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1203 struct virt_dma_desc *vd;
1204 struct gpi_desc *gpi_desc;
1205 struct msm_gpi_tre *client_tre;
1206 void *sg_tre;
1207 void *tre = ch_ring->base +
1208 (ch_ring->el_size * imed_event->tre_index);
1209 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1210
1211 /*
1212 * If channel not active don't process event but let
1213 * client know pending event is available
1214 */
1215 if (gpii_chan->pm_state != ACTIVE_STATE) {
1216 GPII_ERR(gpii, gpii_chan->chid,
1217 "skipping processing event because ch @ %s state\n",
1218 TO_GPI_PM_STR(gpii_chan->pm_state));
1219 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1220 __LINE__);
1221 return;
1222 }
1223
1224 spin_lock_irq(&gpii_chan->vc.lock);
1225 vd = vchan_next_desc(&gpii_chan->vc);
1226 if (!vd) {
1227 struct gpi_ere *gpi_ere;
1228 struct msm_gpi_tre *gpi_tre;
1229
1230 spin_unlock_irq(&gpii_chan->vc.lock);
1231 GPII_ERR(gpii, gpii_chan->chid,
1232 "event without a pending descriptor!\n");
1233 gpi_ere = (struct gpi_ere *)imed_event;
1234 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1235 gpi_ere->dword[0], gpi_ere->dword[1],
1236 gpi_ere->dword[2], gpi_ere->dword[3]);
1237 gpi_tre = tre;
1238 GPII_ERR(gpii, gpii_chan->chid,
1239 "Pending TRE: %08x %08x %08x %08x\n",
1240 gpi_tre->dword[0], gpi_tre->dword[1],
1241 gpi_tre->dword[2], gpi_tre->dword[3]);
1242 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1243 __LINE__);
1244 return;
1245 }
1246 gpi_desc = to_gpi_desc(vd);
1247
1248 /* Event TR RP gen. don't match descriptor TR */
1249 if (gpi_desc->wp != tre) {
1250 spin_unlock_irq(&gpii_chan->vc.lock);
1251 GPII_ERR(gpii, gpii_chan->chid,
1252 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1253 to_physical(ch_ring, gpi_desc->wp),
1254 to_physical(ch_ring, tre));
1255 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1256 __LINE__);
1257 return;
1258 }
1259
1260 list_del(&vd->node);
1261 spin_unlock_irq(&gpii_chan->vc.lock);
1262
1263 sg_tre = gpi_desc->sg_tre;
1264 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1265
1266 /*
1267 * RP pointed by Event is to last TRE processed,
1268 * we need to update ring rp to tre + 1
1269 */
1270 tre += ch_ring->el_size;
1271 if (tre >= (ch_ring->base + ch_ring->len))
1272 tre = ch_ring->base;
1273 ch_ring->rp = tre;
1274 sg_tre += sg_ring->el_size;
1275 if (sg_tre >= (sg_ring->base + sg_ring->len))
1276 sg_tre = sg_ring->base;
1277 sg_ring->rp = sg_tre;
1278
1279 /* make sure rp updates are immediately visible to all cores */
1280 smp_wmb();
1281
1282 /* update Immediate data from Event back in to TRE if it's RX channel */
1283 if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
1284 client_tre->dword[0] =
1285 ((struct msm_gpi_tre *)imed_event)->dword[0];
1286 client_tre->dword[1] =
1287 ((struct msm_gpi_tre *)imed_event)->dword[1];
1288 client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
1289 imed_event->length);
1290 }
1291
1292 tx_cb_param = vd->tx.callback_param;
1293 if (tx_cb_param) {
1294 GPII_VERB(gpii, gpii_chan->chid,
1295 "cb_length:%u compl_code:0x%x status:0x%x\n",
1296 imed_event->length, imed_event->code,
1297 imed_event->status);
1298 tx_cb_param->length = imed_event->length;
1299 tx_cb_param->completion_code = imed_event->code;
1300 tx_cb_param->status = imed_event->status;
1301 }
1302
1303 spin_lock_irq(&gpii_chan->vc.lock);
1304 vchan_cookie_complete(vd);
1305 spin_unlock_irq(&gpii_chan->vc.lock);
1306}
1307
1308/* processing transfer completion events */
1309static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1310 struct xfer_compl_event *compl_event)
1311{
1312 struct gpii *gpii = gpii_chan->gpii;
1313 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1314 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1315 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
1316 struct msm_gpi_tre *client_tre;
1317 struct virt_dma_desc *vd;
1318 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1319 struct gpi_desc *gpi_desc;
1320 void *sg_tre = NULL;
1321
1322 /* only process events on active channel */
1323 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1324 GPII_ERR(gpii, gpii_chan->chid,
1325 "skipping processing event because ch @ %s state\n",
1326 TO_GPI_PM_STR(gpii_chan->pm_state));
1327 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1328 __LINE__);
1329 return;
1330 }
1331
1332 spin_lock_irq(&gpii_chan->vc.lock);
1333 vd = vchan_next_desc(&gpii_chan->vc);
1334 if (!vd) {
1335 struct gpi_ere *gpi_ere;
1336
1337 spin_unlock_irq(&gpii_chan->vc.lock);
1338 GPII_ERR(gpii, gpii_chan->chid,
1339 "Event without a pending descriptor!\n");
1340 gpi_ere = (struct gpi_ere *)compl_event;
1341 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1342 gpi_ere->dword[0], gpi_ere->dword[1],
1343 gpi_ere->dword[2], gpi_ere->dword[3]);
1344 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1345 __LINE__);
1346 return;
1347 }
1348
1349 gpi_desc = to_gpi_desc(vd);
1350
1351 /* TRE Event generated didn't match descriptor's TRE */
1352 if (gpi_desc->wp != ev_rp) {
1353 spin_unlock_irq(&gpii_chan->vc.lock);
1354 GPII_ERR(gpii, gpii_chan->chid,
1355 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1356 to_physical(ch_ring, gpi_desc->wp),
1357 to_physical(ch_ring, ev_rp));
1358 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1359 __LINE__);
1360 return;
1361 }
1362
1363 list_del(&vd->node);
1364 spin_unlock_irq(&gpii_chan->vc.lock);
1365
1366 sg_tre = gpi_desc->sg_tre;
1367 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1368
1369 /*
1370 * RP pointed by Event is to last TRE processed,
1371 * we need to update ring rp to ev_rp + 1
1372 */
1373 ev_rp += ch_ring->el_size;
1374 if (ev_rp >= (ch_ring->base + ch_ring->len))
1375 ev_rp = ch_ring->base;
1376 ch_ring->rp = ev_rp;
1377 sg_tre += sg_ring->el_size;
1378 if (sg_tre >= (sg_ring->base + sg_ring->len))
1379 sg_tre = sg_ring->base;
1380 sg_ring->rp = sg_tre;
1381
1382 /* update must be visible to other cores */
1383 smp_wmb();
1384
1385 tx_cb_param = vd->tx.callback_param;
1386 if (tx_cb_param) {
1387 GPII_VERB(gpii, gpii_chan->chid,
1388 "cb_length:%u compl_code:0x%x status:0x%x\n",
1389 compl_event->length, compl_event->code,
1390 compl_event->status);
1391 tx_cb_param->length = compl_event->length;
1392 tx_cb_param->completion_code = compl_event->code;
1393 tx_cb_param->status = compl_event->status;
1394 }
1395
1396 spin_lock_irq(&gpii_chan->vc.lock);
1397 vchan_cookie_complete(vd);
1398 spin_unlock_irq(&gpii_chan->vc.lock);
1399}
1400
1401/* process all events */
1402static void gpi_process_events(struct gpii *gpii)
1403{
1404 struct gpi_ring *ev_ring = &gpii->ev_ring;
1405 u32 cntxt_rp, local_rp;
1406 union gpi_event *gpi_event;
1407 struct gpii_chan *gpii_chan;
1408 u32 chid, type;
1409 u32 ieob_irq;
1410
1411 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1412 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1413
1414 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
1415 cntxt_rp, local_rp);
1416
1417 do {
1418 while (local_rp != cntxt_rp) {
1419 gpi_event = ev_ring->rp;
1420 chid = gpi_event->xfer_compl_event.chid;
1421 type = gpi_event->xfer_compl_event.type;
1422 GPII_VERB(gpii, GPI_DBG_COMMON,
1423 "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
1424 local_rp, chid, type,
1425 gpi_event->gpi_ere.dword[0],
1426 gpi_event->gpi_ere.dword[1],
1427 gpi_event->gpi_ere.dword[2],
1428 gpi_event->gpi_ere.dword[3]);
1429
1430 switch (type) {
1431 case XFER_COMPLETE_EV_TYPE:
1432 gpii_chan = &gpii->gpii_chan[chid];
1433 gpi_process_xfer_compl_event(gpii_chan,
1434 &gpi_event->xfer_compl_event);
1435 break;
1436 case STALE_EV_TYPE:
1437 GPII_VERB(gpii, GPI_DBG_COMMON,
1438 "stale event, not processing\n");
1439 break;
1440 case IMMEDIATE_DATA_EV_TYPE:
1441 gpii_chan = &gpii->gpii_chan[chid];
1442 gpi_process_imed_data_event(gpii_chan,
1443 &gpi_event->immediate_data_event);
1444 break;
1445 case QUP_NOTIF_EV_TYPE:
1446 gpii_chan = &gpii->gpii_chan[chid];
1447 gpi_process_qup_notif_event(gpii_chan,
1448 &gpi_event->qup_notif_event);
1449 break;
1450 default:
1451 GPII_VERB(gpii, GPI_DBG_COMMON,
1452 "not supported event type:0x%x\n",
1453 type);
1454 }
1455 gpi_ring_recycle_ev_element(ev_ring);
1456 local_rp = (u32)to_physical(ev_ring,
1457 (void *)ev_ring->rp);
1458 }
1459 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1460
1461 /* clear pending IEOB events */
1462 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
1463 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
1464
1465 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1466 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1467
1468 } while (cntxt_rp != local_rp);
1469
1470 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
1471 local_rp);
1472}
1473
1474/* processing events using tasklet */
1475static void gpi_ev_tasklet(unsigned long data)
1476{
1477 struct gpii *gpii = (struct gpii *)data;
1478
1479 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1480
1481 read_lock_bh(&gpii->pm_lock);
1482 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1483 read_unlock_bh(&gpii->pm_lock);
1484 GPII_ERR(gpii, GPI_DBG_COMMON,
1485 "not processing any events, pm_state:%s\n",
1486 TO_GPI_PM_STR(gpii->pm_state));
1487 return;
1488 }
1489
1490 /* process the events */
1491 gpi_process_events(gpii);
1492
1493 /* enable IEOB, switching back to interrupts */
1494 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1495 read_unlock_bh(&gpii->pm_lock);
1496
1497 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1498}
1499
1500/* marks all pending events for the channel as stale */
1501void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1502{
1503 struct gpii *gpii = gpii_chan->gpii;
1504 struct gpi_ring *ev_ring = &gpii->ev_ring;
1505 void *ev_rp;
1506 u32 cntxt_rp, local_rp;
1507
1508 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1509 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1510
1511 ev_rp = ev_ring->rp;
1512 local_rp = (u32)to_physical(ev_ring, ev_rp);
1513 while (local_rp != cntxt_rp) {
1514 union gpi_event *gpi_event = ev_rp;
1515 u32 chid = gpi_event->xfer_compl_event.chid;
1516
1517 if (chid == gpii_chan->chid)
1518 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1519 ev_rp += ev_ring->el_size;
1520 if (ev_rp >= (ev_ring->base + ev_ring->len))
1521 ev_rp = ev_ring->base;
1522 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1523 local_rp = (u32)to_physical(ev_ring, ev_rp);
1524 }
1525}
1526
1527/* reset sw state and issue channel reset or de-alloc */
1528static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1529{
1530 struct gpii *gpii = gpii_chan->gpii;
1531 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1532 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1533 unsigned long flags;
1534 LIST_HEAD(list);
1535 int ret;
1536
1537 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1538 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1539 if (ret) {
1540 GPII_ERR(gpii, gpii_chan->chid,
1541 "Error with cmd:%s ret:%d\n",
1542 TO_GPI_CMD_STR(gpi_cmd), ret);
1543 return ret;
1544 }
1545
1546 /* initialize the local ring ptrs */
1547 ch_ring->rp = ch_ring->base;
1548 ch_ring->wp = ch_ring->base;
1549 sg_ring->rp = sg_ring->base;
1550 sg_ring->wp = sg_ring->base;
1551
1552 /* visible to other cores */
1553 smp_wmb();
1554
1555 /* check event ring for any stale events */
1556 write_lock_irq(&gpii->pm_lock);
1557 gpi_mark_stale_events(gpii_chan);
1558
1559 /* remove all async descriptors */
1560 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1561 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1562 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1563 write_unlock_irq(&gpii->pm_lock);
1564 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1565
1566 return 0;
1567}
1568
1569static int gpi_start_chan(struct gpii_chan *gpii_chan)
1570{
1571 struct gpii *gpii = gpii_chan->gpii;
1572 int ret;
1573
1574 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1575
1576 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1577 if (ret) {
1578 GPII_ERR(gpii, gpii_chan->chid,
1579 "Error with cmd:%s ret:%d\n",
1580 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1581 return ret;
1582 }
1583
1584 /* gpii CH is active now */
1585 write_lock_irq(&gpii->pm_lock);
1586 gpii_chan->pm_state = ACTIVE_STATE;
1587 write_unlock_irq(&gpii->pm_lock);
1588
1589 return 0;
1590}
1591
1592/* allocate and configure the transfer channel */
1593static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1594{
1595 struct gpii *gpii = gpii_chan->gpii;
1596 struct gpi_ring *ring = &gpii_chan->ch_ring;
1597 int i;
1598 int ret;
1599 struct {
1600 void *base;
1601 int offset;
1602 u32 val;
1603 } ch_reg[] = {
1604 {
1605 gpii_chan->ch_cntxt_base_reg,
1606 CNTXT_0_CONFIG,
1607 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1608 gpii_chan->dir,
1609 GPI_CHTYPE_PROTO_GPI),
1610 },
1611 {
1612 gpii_chan->ch_cntxt_base_reg,
1613 CNTXT_1_R_LENGTH,
1614 ring->len,
1615 },
1616 {
1617 gpii_chan->ch_cntxt_base_reg,
1618 CNTXT_2_RING_BASE_LSB,
1619 (u32)ring->phys_addr,
1620 },
1621 {
1622 gpii_chan->ch_cntxt_base_reg,
1623 CNTXT_3_RING_BASE_MSB,
1624 (u32)(ring->phys_addr >> 32),
1625 },
1626 { /* program MSB of DB register with ring base */
1627 gpii_chan->ch_cntxt_db_reg,
1628 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1629 (u32)(ring->phys_addr >> 32),
1630 },
1631 {
1632 gpii->regs,
1633 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1634 gpii_chan->chid),
1635 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1636 gpii_chan->protocol,
1637 gpii_chan->seid),
1638 },
1639 {
1640 gpii->regs,
1641 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1642 gpii_chan->chid),
1643 0,
1644 },
1645 {
1646 gpii->regs,
1647 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1648 gpii_chan->chid),
1649 0,
1650 },
1651 {
1652 gpii->regs,
1653 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1654 gpii_chan->chid),
1655 0,
1656 },
1657 {
1658 gpii->regs,
1659 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1660 gpii_chan->chid),
1661 1,
1662 },
1663 { NULL },
1664 };
1665
1666 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1667
1668 if (send_alloc_cmd) {
1669 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1670 if (ret) {
1671 GPII_ERR(gpii, gpii_chan->chid,
1672 "Error with cmd:%s ret:%d\n",
1673 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1674 return ret;
1675 }
1676 }
1677
1678 /* program channel cntxt registers */
1679 for (i = 0; ch_reg[i].base; i++)
1680 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1681 ch_reg[i].val);
1682 /* flush all the writes */
1683 wmb();
1684 return 0;
1685}
1686
1687/* allocate and configure event ring */
1688static int gpi_alloc_ev_chan(struct gpii *gpii)
1689{
1690 struct gpi_ring *ring = &gpii->ev_ring;
1691 int i;
1692 int ret;
1693 struct {
1694 void *base;
1695 int offset;
1696 u32 val;
1697 } ev_reg[] = {
1698 {
1699 gpii->ev_cntxt_base_reg,
1700 CNTXT_0_CONFIG,
1701 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1702 GPI_INTTYPE_IRQ,
1703 GPI_CHTYPE_GPI_EV),
1704 },
1705 {
1706 gpii->ev_cntxt_base_reg,
1707 CNTXT_1_R_LENGTH,
1708 ring->len,
1709 },
1710 {
1711 gpii->ev_cntxt_base_reg,
1712 CNTXT_2_RING_BASE_LSB,
1713 (u32)ring->phys_addr,
1714 },
1715 {
1716 gpii->ev_cntxt_base_reg,
1717 CNTXT_3_RING_BASE_MSB,
1718 (u32)(ring->phys_addr >> 32),
1719 },
1720 {
1721 /* program db msg with ring base msb */
1722 gpii->ev_cntxt_db_reg,
1723 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1724 (u32)(ring->phys_addr >> 32),
1725 },
1726 {
1727 gpii->ev_cntxt_base_reg,
1728 CNTXT_8_RING_INT_MOD,
1729 0,
1730 },
1731 {
1732 gpii->ev_cntxt_base_reg,
1733 CNTXT_10_RING_MSI_LSB,
1734 0,
1735 },
1736 {
1737 gpii->ev_cntxt_base_reg,
1738 CNTXT_11_RING_MSI_MSB,
1739 0,
1740 },
1741 {
1742 gpii->ev_cntxt_base_reg,
1743 CNTXT_8_RING_INT_MOD,
1744 0,
1745 },
1746 {
1747 gpii->ev_cntxt_base_reg,
1748 CNTXT_12_RING_RP_UPDATE_LSB,
1749 0,
1750 },
1751 {
1752 gpii->ev_cntxt_base_reg,
1753 CNTXT_13_RING_RP_UPDATE_MSB,
1754 0,
1755 },
1756 { NULL },
1757 };
1758
1759 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1760
1761 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1762 if (ret) {
1763 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1764 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1765 return ret;
1766 }
1767
1768 /* program event context */
1769 for (i = 0; ev_reg[i].base; i++)
1770 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1771 ev_reg[i].val);
1772
1773 /* add events to ring */
1774 ring->wp = (ring->base + ring->len - ring->el_size);
1775
1776 /* flush all the writes */
1777 wmb();
1778
1779 /* gpii is active now */
1780 write_lock_irq(&gpii->pm_lock);
1781 gpii->pm_state = ACTIVE_STATE;
1782 write_unlock_irq(&gpii->pm_lock);
1783 gpi_write_ev_db(gpii, ring, ring->wp);
1784
1785 return 0;
1786}
1787
1788/* calculate # of ERE/TRE available to queue */
1789static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1790{
1791 int elements = 0;
1792
1793 if (ring->wp < ring->rp)
1794 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1795 else {
1796 elements = (ring->rp - ring->base) / ring->el_size;
1797 elements += ((ring->base + ring->len - ring->wp) /
1798 ring->el_size) - 1;
1799 }
1800
1801 return elements;
1802}
1803
1804static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1805{
1806
1807 if (gpi_ring_num_elements_avail(ring) <= 0)
1808 return -ENOMEM;
1809
1810 *wp = ring->wp;
1811 ring->wp += ring->el_size;
1812 if (ring->wp >= (ring->base + ring->len))
1813 ring->wp = ring->base;
1814
1815 /* visible to other cores */
1816 smp_wmb();
1817
1818 return 0;
1819}
1820
1821static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1822{
1823 /* Update the WP */
1824 ring->wp += ring->el_size;
1825 if (ring->wp >= (ring->base + ring->len))
1826 ring->wp = ring->base;
1827
1828 /* Update the RP */
1829 ring->rp += ring->el_size;
1830 if (ring->rp >= (ring->base + ring->len))
1831 ring->rp = ring->base;
1832
1833 /* visible to other cores */
1834 smp_wmb();
1835}
1836
1837static void gpi_free_ring(struct gpi_ring *ring,
1838 struct gpii *gpii)
1839{
1840 if (ring->dma_handle)
1841 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1842 ring->pre_aligned, ring->dma_handle);
1843 else
1844 vfree(ring->pre_aligned);
1845 memset(ring, 0, sizeof(*ring));
1846}
1847
1848/* allocate memory for transfer and event rings */
1849static int gpi_alloc_ring(struct gpi_ring *ring,
1850 u32 elements,
1851 u32 el_size,
1852 struct gpii *gpii,
1853 bool alloc_coherent)
1854{
1855 u64 len = elements * el_size;
1856 int bit;
1857
1858 if (alloc_coherent) {
1859 /* ring len must be power of 2 */
1860 bit = find_last_bit((unsigned long *)&len, 32);
1861 if (((1 << bit) - 1) & len)
1862 bit++;
1863 len = 1 << bit;
1864 ring->alloc_size = (len + (len - 1));
1865 GPII_INFO(gpii, GPI_DBG_COMMON,
1866 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
1867 elements, el_size, (elements * el_size), len,
1868 ring->alloc_size);
1869 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1870 ring->alloc_size,
1871 &ring->dma_handle,
1872 GFP_KERNEL);
1873 if (!ring->pre_aligned) {
1874 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1875 "could not alloc size:%lu mem for ring\n",
1876 ring->alloc_size);
1877 return -ENOMEM;
1878 }
1879
1880 /* align the physical mem */
1881 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1882 ring->base = ring->pre_aligned +
1883 (ring->phys_addr - ring->dma_handle);
1884 } else {
1885 ring->pre_aligned = vmalloc(len);
1886 if (!ring->pre_aligned) {
1887 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1888 "could not allocsize:%llu mem for ring\n",
1889 len);
1890 return -ENOMEM;
1891 }
1892 ring->phys_addr = 0;
1893 ring->dma_handle = 0;
1894 ring->base = ring->pre_aligned;
1895 }
1896
1897 ring->rp = ring->base;
1898 ring->wp = ring->base;
1899 ring->len = len;
1900 ring->el_size = el_size;
1901 ring->elements = ring->len / ring->el_size;
1902 memset(ring->base, 0, ring->len);
1903 ring->configured = true;
1904
1905 /* update to other cores */
1906 smp_wmb();
1907
1908 GPII_INFO(gpii, GPI_DBG_COMMON,
1909 "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
1910 ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
1911 ring->elements);
1912
1913 return 0;
1914}
1915
1916/* copy tre into transfer ring */
1917static void gpi_queue_xfer(struct gpii *gpii,
1918 struct gpii_chan *gpii_chan,
1919 struct msm_gpi_tre *gpi_tre,
1920 void **wp,
1921 struct sg_tre **sg_tre)
1922{
1923 struct msm_gpi_tre *ch_tre;
1924 int ret;
1925
1926 /* get next tre location we can copy */
1927 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1928 if (unlikely(ret)) {
1929 GPII_CRITIC(gpii, gpii_chan->chid,
1930 "Error adding ring element to xfer ring\n");
1931 return;
1932 }
1933 /* get next sg tre location we can use */
1934 ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
1935 if (unlikely(ret)) {
1936 GPII_CRITIC(gpii, gpii_chan->chid,
1937 "Error adding ring element to sg ring\n");
1938 return;
1939 }
1940
1941 /* copy the tre info */
1942 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
1943 (*sg_tre)->ptr = gpi_tre;
1944 (*sg_tre)->wp = ch_tre;
1945 *wp = ch_tre;
1946}
1947
1948/* reset and restart transfer channel */
1949int gpi_terminate_all(struct dma_chan *chan)
1950{
1951 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1952 struct gpii *gpii = gpii_chan->gpii;
1953 int schid, echid, i;
1954 int ret = 0;
1955
1956 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1957 mutex_lock(&gpii->ctrl_lock);
1958
1959 /*
1960 * treat both channels as a group if its protocol is not UART
1961 * STOP, RESET, or START needs to be in lockstep
1962 */
1963 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1964 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1965 MAX_CHANNELS_PER_GPII;
1966
1967 /* stop the channel */
1968 for (i = schid; i < echid; i++) {
1969 gpii_chan = &gpii->gpii_chan[i];
1970
1971 /* disable ch state so no more TRE processing */
1972 write_lock_irq(&gpii->pm_lock);
1973 gpii_chan->pm_state = PREPARE_TERMINATE;
1974 write_unlock_irq(&gpii->pm_lock);
1975
1976 /* send command to Stop the channel */
1977 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1978 if (ret)
1979 GPII_ERR(gpii, gpii_chan->chid,
1980 "Error Stopping Channel:%d resetting anyway\n",
1981 ret);
1982 }
1983
1984 /* reset the channels (clears any pending tre) */
1985 for (i = schid; i < echid; i++) {
1986 gpii_chan = &gpii->gpii_chan[i];
1987
1988 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1989 if (ret) {
1990 GPII_ERR(gpii, gpii_chan->chid,
1991 "Error resetting channel ret:%d\n", ret);
1992 goto terminate_exit;
1993 }
1994
1995 /* reprogram channel CNTXT */
1996 ret = gpi_alloc_chan(gpii_chan, false);
1997 if (ret) {
1998 GPII_ERR(gpii, gpii_chan->chid,
1999 "Error alloc_channel ret:%d\n", ret);
2000 goto terminate_exit;
2001 }
2002 }
2003
2004 /* restart the channels */
2005 for (i = schid; i < echid; i++) {
2006 gpii_chan = &gpii->gpii_chan[i];
2007
2008 ret = gpi_start_chan(gpii_chan);
2009 if (ret) {
2010 GPII_ERR(gpii, gpii_chan->chid,
2011 "Error Starting Channel ret:%d\n", ret);
2012 goto terminate_exit;
2013 }
2014 }
2015
2016terminate_exit:
2017 mutex_unlock(&gpii->ctrl_lock);
2018 return ret;
2019}
2020
2021/* pause dma transfer for all channels */
2022static int gpi_pause(struct dma_chan *chan)
2023{
2024 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2025 struct gpii *gpii = gpii_chan->gpii;
2026 int i, ret;
2027
2028 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
2029 mutex_lock(&gpii->ctrl_lock);
2030
2031 /*
2032 * pause/resume are per gpii not per channel, so
2033 * client needs to call pause only once
2034 */
2035 if (gpii->pm_state == PAUSE_STATE) {
2036 GPII_INFO(gpii, gpii_chan->chid,
2037 "channel is already paused\n");
2038 mutex_unlock(&gpii->ctrl_lock);
2039 return 0;
2040 }
2041
2042 /* send stop command to stop the channels */
2043 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2044 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
2045 if (ret) {
2046 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2047 "Error stopping chan, ret:%d\n", ret);
2048 mutex_unlock(&gpii->ctrl_lock);
2049 return ret;
2050 }
2051 }
2052
2053 disable_irq(gpii->irq);
2054
2055 /* Wait for threads to complete out */
2056 tasklet_kill(&gpii->ev_task);
2057
2058 write_lock_irq(&gpii->pm_lock);
2059 gpii->pm_state = PAUSE_STATE;
2060 write_unlock_irq(&gpii->pm_lock);
2061 mutex_unlock(&gpii->ctrl_lock);
2062
2063 return 0;
2064}
2065
2066/* resume dma transfer */
2067static int gpi_resume(struct dma_chan *chan)
2068{
2069 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2070 struct gpii *gpii = gpii_chan->gpii;
2071 int i;
2072 int ret;
2073
2074 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2075
2076 mutex_lock(&gpii->ctrl_lock);
2077 if (gpii->pm_state == ACTIVE_STATE) {
2078 GPII_INFO(gpii, gpii_chan->chid,
2079 "channel is already active\n");
2080 mutex_unlock(&gpii->ctrl_lock);
2081 return 0;
2082 }
2083
2084 enable_irq(gpii->irq);
2085
2086 /* send start command to start the channels */
2087 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2088 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2089 if (ret) {
2090 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2091 "Erro starting chan, ret:%d\n", ret);
2092 mutex_unlock(&gpii->ctrl_lock);
2093 return ret;
2094 }
2095 }
2096
2097 write_lock_irq(&gpii->pm_lock);
2098 gpii->pm_state = ACTIVE_STATE;
2099 write_unlock_irq(&gpii->pm_lock);
2100 mutex_unlock(&gpii->ctrl_lock);
2101
2102 return 0;
2103}
2104
2105void gpi_desc_free(struct virt_dma_desc *vd)
2106{
2107 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2108
2109 kfree(gpi_desc);
2110}
2111
2112/* copy tre into transfer ring */
2113struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2114 struct scatterlist *sgl,
2115 unsigned int sg_len,
2116 enum dma_transfer_direction direction,
2117 unsigned long flags,
2118 void *context)
2119{
2120 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2121 struct gpii *gpii = gpii_chan->gpii;
2122 u32 nr, sg_nr;
2123 u32 nr_req = 0;
2124 int i, j;
2125 struct scatterlist *sg;
2126 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
2127 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
2128 void *tre, *wp = NULL;
2129 struct sg_tre *sg_tre = NULL;
2130 const gfp_t gfp = GFP_ATOMIC;
2131 struct gpi_desc *gpi_desc;
2132
2133 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2134
2135 if (!is_slave_direction(direction)) {
2136 GPII_ERR(gpii, gpii_chan->chid,
2137 "invalid dma direction: %d\n", direction);
2138 return NULL;
2139 }
2140
2141 /* calculate # of elements required & available */
2142 nr = gpi_ring_num_elements_avail(ch_ring);
2143 sg_nr = gpi_ring_num_elements_avail(sg_ring);
2144 for_each_sg(sgl, sg, sg_len, i) {
2145 GPII_VERB(gpii, gpii_chan->chid,
2146 "%d of %u len:%u\n", i, sg_len, sg->length);
2147 nr_req += (sg->length / ch_ring->el_size);
2148 }
2149 GPII_VERB(gpii, gpii_chan->chid,
2150 "nr_elements_avail:%u sg_avail:%u required:%u\n",
2151 nr, sg_nr, nr_req);
2152
2153 if (nr < nr_req || sg_nr < nr_req) {
2154 GPII_ERR(gpii, gpii_chan->chid,
2155 "not enough space in ring, avail:%u,%u required:%u\n",
2156 nr, sg_nr, nr_req);
2157 return NULL;
2158 }
2159
2160 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2161 if (!gpi_desc) {
2162 GPII_ERR(gpii, gpii_chan->chid,
2163 "out of memory for descriptor\n");
2164 return NULL;
2165 }
2166
2167 /* copy each tre into transfer ring */
2168 for_each_sg(sgl, sg, sg_len, i)
2169 for (j = 0, tre = sg_virt(sg); j < sg->length;
2170 j += ch_ring->el_size, tre += ch_ring->el_size)
2171 gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
2172
2173 /* set up the descriptor */
2174 gpi_desc->db = ch_ring->wp;
2175 gpi_desc->wp = wp;
2176 gpi_desc->sg_tre = sg_tre;
2177 gpi_desc->gpii_chan = gpii_chan;
2178 GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
2179 to_physical(ch_ring, ch_ring->wp),
2180 to_physical(ch_ring, ch_ring->rp));
2181
2182 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2183}
2184
2185/* rings transfer ring db to being transfer */
2186static void gpi_issue_pending(struct dma_chan *chan)
2187{
2188 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2189 struct gpii *gpii = gpii_chan->gpii;
2190 unsigned long flags, pm_lock_flags;
2191 struct virt_dma_desc *vd = NULL;
2192 struct gpi_desc *gpi_desc;
2193
2194 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2195
2196 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2197
2198 /* move all submitted discriptors to issued list */
2199 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2200 if (vchan_issue_pending(&gpii_chan->vc))
2201 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2202 struct virt_dma_desc, node);
2203 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2204
2205 /* nothing to do list is empty */
2206 if (!vd) {
2207 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2208 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2209 return;
2210 }
2211
2212 gpi_desc = to_gpi_desc(vd);
2213 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2214 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2215}
2216
2217/* configure or issue async command */
2218static int gpi_config(struct dma_chan *chan,
2219 struct dma_slave_config *config)
2220{
2221 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2222 struct gpii *gpii = gpii_chan->gpii;
2223 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2224 const int ev_factor = gpii->gpi_dev->ev_factor;
2225 u32 elements;
2226 int i = 0;
2227 int ret = 0;
2228
2229 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2230 if (!gpi_ctrl) {
2231 GPII_ERR(gpii, gpii_chan->chid,
2232 "no config ctrl data provided");
2233 return -EINVAL;
2234 }
2235
2236 mutex_lock(&gpii->ctrl_lock);
2237
2238 switch (gpi_ctrl->cmd) {
2239 case MSM_GPI_INIT:
2240 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2241
2242 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2243 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2244 gpii_chan->pm_state = CONFIG_STATE;
2245
2246 /* check if both channels are configured before continue */
2247 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2248 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2249 goto exit_gpi_init;
2250
2251 /* configure to highest priority from two channels */
2252 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2253 gpii->gpii_chan[1].priority);
2254
2255 /* protocol must be same for both channels */
2256 if (gpii->gpii_chan[0].protocol !=
2257 gpii->gpii_chan[1].protocol) {
2258 GPII_ERR(gpii, gpii_chan->chid,
2259 "protocol did not match protocol %u != %u\n",
2260 gpii->gpii_chan[0].protocol,
2261 gpii->gpii_chan[1].protocol);
2262 ret = -EINVAL;
2263 goto exit_gpi_init;
2264 }
2265 gpii->protocol = gpii_chan->protocol;
2266
2267 /* allocate memory for event ring */
2268 elements = max(gpii->gpii_chan[0].req_tres,
2269 gpii->gpii_chan[1].req_tres);
2270 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
2271 sizeof(union gpi_event), gpii, true);
2272 if (ret) {
2273 GPII_ERR(gpii, gpii_chan->chid,
2274 "error allocating mem for ev ring\n");
2275 goto exit_gpi_init;
2276 }
2277
2278 /* configure interrupts */
2279 write_lock_irq(&gpii->pm_lock);
2280 gpii->pm_state = PREPARE_HARDWARE;
2281 write_unlock_irq(&gpii->pm_lock);
2282 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2283 if (ret) {
2284 GPII_ERR(gpii, gpii_chan->chid,
2285 "error config. interrupts, ret:%d\n", ret);
2286 goto error_config_int;
2287 }
2288
2289 /* allocate event rings */
2290 ret = gpi_alloc_ev_chan(gpii);
2291 if (ret) {
2292 GPII_ERR(gpii, gpii_chan->chid,
2293 "error alloc_ev_chan:%d\n", ret);
2294 goto error_alloc_ev_ring;
2295 }
2296
2297 /* Allocate all channels */
2298 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2299 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2300 if (ret) {
2301 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2302 "Error allocating chan:%d\n", ret);
2303 goto error_alloc_chan;
2304 }
2305 }
2306
2307 /* start channels */
2308 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2309 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2310 if (ret) {
2311 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2312 "Error start chan:%d\n", ret);
2313 goto error_start_chan;
2314 }
2315 }
2316
2317 break;
2318 case MSM_GPI_CMD_UART_SW_STALE:
2319 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2320 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2321 break;
2322 case MSM_GPI_CMD_UART_RFR_READY:
2323 GPII_INFO(gpii, gpii_chan->chid,
2324 "sending UART RFR READY cmd\n");
2325 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2326 break;
2327 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2328 GPII_INFO(gpii, gpii_chan->chid,
2329 "sending UART RFR READY NOT READY cmd\n");
2330 ret = gpi_send_cmd(gpii, gpii_chan,
2331 GPI_CH_CMD_UART_RFR_NOT_READY);
2332 break;
2333 default:
2334 GPII_ERR(gpii, gpii_chan->chid,
2335 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2336 ret = -EINVAL;
2337 }
2338
2339 mutex_unlock(&gpii->ctrl_lock);
2340 return ret;
2341
2342error_start_chan:
2343 for (i = i - 1; i >= 0; i++) {
2344 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2345 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2346 }
2347 i = 2;
2348error_alloc_chan:
2349 for (i = i - 1; i >= 0; i--)
2350 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2351error_alloc_ev_ring:
2352 gpi_disable_interrupts(gpii);
2353error_config_int:
2354 gpi_free_ring(&gpii->ev_ring, gpii);
2355exit_gpi_init:
2356 mutex_unlock(&gpii->ctrl_lock);
2357 return ret;
2358}
2359
2360/* release all channel resources */
2361static void gpi_free_chan_resources(struct dma_chan *chan)
2362{
2363 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2364 struct gpii *gpii = gpii_chan->gpii;
2365 enum gpi_pm_state cur_state;
2366 int ret, i;
2367
2368 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2369
2370 mutex_lock(&gpii->ctrl_lock);
2371
2372 cur_state = gpii_chan->pm_state;
2373
2374 /* disable ch state so no more TRE processing for this channel */
2375 write_lock_irq(&gpii->pm_lock);
2376 gpii_chan->pm_state = PREPARE_TERMINATE;
2377 write_unlock_irq(&gpii->pm_lock);
2378
2379 /* attemp to do graceful hardware shutdown */
2380 if (cur_state == ACTIVE_STATE) {
2381 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2382 if (ret)
2383 GPII_ERR(gpii, gpii_chan->chid,
2384 "error stopping channel:%d\n", ret);
2385
2386 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2387 if (ret)
2388 GPII_ERR(gpii, gpii_chan->chid,
2389 "error resetting channel:%d\n", ret);
2390
2391 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2392 }
2393
2394 /* free all allocated memory */
2395 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2396 gpi_free_ring(&gpii_chan->sg_ring, gpii);
2397 vchan_free_chan_resources(&gpii_chan->vc);
2398
2399 write_lock_irq(&gpii->pm_lock);
2400 gpii_chan->pm_state = DISABLE_STATE;
2401 write_unlock_irq(&gpii->pm_lock);
2402
2403 /* if other rings are still active exit */
2404 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2405 if (gpii->gpii_chan[i].ch_ring.configured)
2406 goto exit_free;
2407
2408 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2409
2410 /* deallocate EV Ring */
2411 cur_state = gpii->pm_state;
2412 write_lock_irq(&gpii->pm_lock);
2413 gpii->pm_state = PREPARE_TERMINATE;
2414 write_unlock_irq(&gpii->pm_lock);
2415
2416 /* wait for threads to complete out */
2417 tasklet_kill(&gpii->ev_task);
2418
2419 /* send command to de allocate event ring */
2420 if (cur_state == ACTIVE_STATE)
2421 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2422
2423 gpi_free_ring(&gpii->ev_ring, gpii);
2424
2425 /* disable interrupts */
2426 if (cur_state == ACTIVE_STATE)
2427 gpi_disable_interrupts(gpii);
2428
2429 /* set final state to disable */
2430 write_lock_irq(&gpii->pm_lock);
2431 gpii->pm_state = DISABLE_STATE;
2432 write_unlock_irq(&gpii->pm_lock);
2433
2434exit_free:
2435 mutex_unlock(&gpii->ctrl_lock);
2436}
2437
2438/* allocate channel resources */
2439static int gpi_alloc_chan_resources(struct dma_chan *chan)
2440{
2441 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2442 struct gpii *gpii = gpii_chan->gpii;
2443 int ret;
2444
2445 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2446
2447 mutex_lock(&gpii->ctrl_lock);
2448
2449 /* allocate memory for transfer ring */
2450 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
2451 sizeof(struct msm_gpi_tre), gpii, true);
2452 if (ret) {
2453 GPII_ERR(gpii, gpii_chan->chid,
2454 "error allocating xfer ring, ret:%d\n", ret);
2455 goto xfer_alloc_err;
2456 }
2457
2458 ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
2459 sizeof(struct sg_tre), gpii, false);
2460 if (ret) {
2461 GPII_ERR(gpii, gpii_chan->chid,
2462 "error allocating sg ring, ret:%d\n", ret);
2463 goto sg_alloc_error;
2464 }
2465 mutex_unlock(&gpii->ctrl_lock);
2466
2467 return 0;
2468
2469sg_alloc_error:
2470 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2471xfer_alloc_err:
2472 mutex_unlock(&gpii->ctrl_lock);
2473
2474 return ret;
2475}
2476
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002477static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2478{
2479 int gpii;
2480 struct gpii_chan *tx_chan, *rx_chan;
2481
2482 /* check if same seid is already configured for another chid */
2483 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2484 if (!((1 << gpii) & gpi_dev->gpii_mask))
2485 continue;
2486
2487 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2488 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2489
2490 if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2491 return gpii;
2492 if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2493 return gpii;
2494 }
2495
2496 /* no channels configured with same seid, return next avail gpii */
2497 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2498 if (!((1 << gpii) & gpi_dev->gpii_mask))
2499 continue;
2500
2501 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2502 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2503
2504 /* check if gpii is configured */
2505 if (tx_chan->vc.chan.client_count ||
2506 rx_chan->vc.chan.client_count)
2507 continue;
2508
2509 /* found a free gpii */
2510 return gpii;
2511 }
2512
2513 /* no gpii instance available to use */
2514 return -EIO;
2515}
2516
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002517/* gpi_of_dma_xlate: open client requested channel */
2518static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2519 struct of_dma *of_dma)
2520{
2521 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002522 u32 seid, chid;
2523 int gpii;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002524 struct gpii_chan *gpii_chan;
2525
2526 if (args->args_count < REQ_OF_DMA_ARGS) {
2527 GPI_ERR(gpi_dev,
2528 "gpii require minimum 6 args, client passed:%d args\n",
2529 args->args_count);
2530 return NULL;
2531 }
2532
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002533 chid = args->args[0];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002534 if (chid >= MAX_CHANNELS_PER_GPII) {
2535 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2536 return NULL;
2537 }
2538
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002539 seid = args->args[1];
2540
2541 /* find next available gpii to use */
2542 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2543 if (gpii < 0) {
2544 GPI_ERR(gpi_dev, "no available gpii instances\n");
2545 return NULL;
2546 }
2547
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002548 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002549 if (gpii_chan->vc.chan.client_count) {
2550 GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
2551 gpii, chid, gpii_chan->seid);
2552 return NULL;
2553 }
2554
2555 /* get ring size, protocol, se_id, and priority */
2556 gpii_chan->seid = seid;
2557 gpii_chan->protocol = args->args[2];
2558 gpii_chan->req_tres = args->args[3];
2559 gpii_chan->priority = args->args[4];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002560
2561 GPI_LOG(gpi_dev,
2562 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2563 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2564 gpii_chan->protocol);
2565
2566 return dma_get_slave_channel(&gpii_chan->vc.chan);
2567}
2568
2569/* gpi_setup_debug - setup debug capabilities */
2570static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2571{
2572 char node_name[GPI_LABEL_SIZE];
2573 const umode_t mode = 0600;
2574 int i;
2575
2576 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2577 (u64)gpi_dev->res->start);
2578
2579 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2580 node_name, 0);
2581 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2582 if (!IS_ERR_OR_NULL(pdentry)) {
2583 snprintf(node_name, sizeof(node_name), "%llx",
2584 (u64)gpi_dev->res->start);
2585 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2586 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2587 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2588 &gpi_dev->ipc_log_lvl);
2589 debugfs_create_u32("klog_lvl", mode,
2590 gpi_dev->dentry, &gpi_dev->klog_lvl);
2591 }
2592 }
2593
2594 for (i = 0; i < gpi_dev->max_gpii; i++) {
2595 struct gpii *gpii;
2596
2597 if (!((1 << i) & gpi_dev->gpii_mask))
2598 continue;
2599
2600 gpii = &gpi_dev->gpiis[i];
2601 snprintf(gpii->label, sizeof(gpii->label),
2602 "%s%llx_gpii%d",
2603 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2604 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2605 gpii->label, 0);
2606 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2607 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2608
2609 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2610 continue;
2611
2612 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2613 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2614 if (IS_ERR_OR_NULL(gpii->dentry))
2615 continue;
2616
2617 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2618 &gpii->ipc_log_lvl);
2619 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2620 &gpii->klog_lvl);
2621 }
2622}
2623
Sujeev Dias69484212017-08-31 10:06:53 -07002624static struct dma_iommu_mapping *gpi_create_mapping(struct gpi_dev *gpi_dev)
2625{
2626 dma_addr_t base;
2627 size_t size;
2628
2629 /*
2630 * If S1_BYPASS enabled then iommu space is not used, however framework
2631 * still require clients to create a mapping space before attaching. So
2632 * set to smallest size required by iommu framework.
2633 */
2634 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2635 base = 0;
2636 size = PAGE_SIZE;
2637 } else {
2638 base = gpi_dev->iova_base;
2639 size = gpi_dev->iova_size;
2640 }
2641
2642 GPI_LOG(gpi_dev, "Creating iommu mapping of base:0x%llx size:%lu\n",
2643 base, size);
2644
2645 return arm_iommu_create_mapping(&platform_bus_type, base, size);
2646}
2647
2648static int gpi_dma_mask(struct gpi_dev *gpi_dev)
2649{
2650 int mask = 64;
2651
2652 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2653 unsigned long addr;
2654
2655 addr = gpi_dev->iova_base + gpi_dev->iova_size + 1;
2656 mask = find_last_bit(&addr, 64);
2657 }
2658
2659 GPI_LOG(gpi_dev, "Setting dma mask to %d\n", mask);
2660
2661 return dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(mask));
2662}
2663
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002664static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2665{
Sujeev Dias69484212017-08-31 10:06:53 -07002666 struct dma_iommu_mapping *mapping = NULL;
2667 int ret;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002668
Sujeev Dias69484212017-08-31 10:06:53 -07002669 if (gpi_dev->smmu_cfg) {
2670
2671 /* create mapping table */
2672 mapping = gpi_create_mapping(gpi_dev);
2673 if (IS_ERR(mapping)) {
2674 GPI_ERR(gpi_dev,
2675 "Failed to create iommu mapping, ret:%ld\n",
2676 PTR_ERR(mapping));
2677 return PTR_ERR(mapping);
2678 }
2679
2680 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2681 int s1_bypass = 1;
2682
2683 ret = iommu_domain_set_attr(mapping->domain,
2684 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
2685 if (ret) {
2686 GPI_ERR(gpi_dev,
2687 "Failed to set attr S1_BYPASS, ret:%d\n",
2688 ret);
2689 goto release_mapping;
2690 }
2691 }
2692
2693 if (gpi_dev->smmu_cfg & GPI_SMMU_FAST) {
2694 int fast = 1;
2695
2696 ret = iommu_domain_set_attr(mapping->domain,
2697 DOMAIN_ATTR_FAST, &fast);
2698 if (ret) {
2699 GPI_ERR(gpi_dev,
2700 "Failed to set attr FAST, ret:%d\n",
2701 ret);
2702 goto release_mapping;
2703 }
2704 }
2705
2706 if (gpi_dev->smmu_cfg & GPI_SMMU_ATOMIC) {
2707 int atomic = 1;
2708
2709 ret = iommu_domain_set_attr(mapping->domain,
2710 DOMAIN_ATTR_ATOMIC, &atomic);
2711 if (ret) {
2712 GPI_ERR(gpi_dev,
2713 "Failed to set attr ATOMIC, ret:%d\n",
2714 ret);
2715 goto release_mapping;
2716 }
2717 }
2718
2719 ret = arm_iommu_attach_device(gpi_dev->dev, mapping);
2720 if (ret) {
2721 GPI_ERR(gpi_dev,
2722 "Failed with iommu_attach, ret:%d\n", ret);
2723 goto release_mapping;
2724 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002725 }
2726
Sujeev Dias69484212017-08-31 10:06:53 -07002727 ret = gpi_dma_mask(gpi_dev);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002728 if (ret) {
Sujeev Dias69484212017-08-31 10:06:53 -07002729 GPI_ERR(gpi_dev, "Error setting dma_mask, ret:%d\n", ret);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002730 goto error_set_mask;
2731 }
2732
2733 return ret;
2734
2735error_set_mask:
Sujeev Dias69484212017-08-31 10:06:53 -07002736 if (gpi_dev->smmu_cfg)
2737 arm_iommu_detach_device(gpi_dev->dev);
2738release_mapping:
2739 if (mapping)
2740 arm_iommu_release_mapping(mapping);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002741 return ret;
2742}
2743
2744static int gpi_probe(struct platform_device *pdev)
2745{
2746 struct gpi_dev *gpi_dev;
2747 int ret, i;
2748
2749 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2750 if (!gpi_dev)
2751 return -ENOMEM;
2752
2753 gpi_dev->dev = &pdev->dev;
2754 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2755 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2756 "gpi-top");
2757 if (!gpi_dev->res) {
2758 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2759 return -EINVAL;
2760 }
2761 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2762 resource_size(gpi_dev->res));
2763 if (!gpi_dev->regs) {
2764 GPI_ERR(gpi_dev, "IO remap failed\n");
2765 return -EFAULT;
2766 }
2767
2768 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2769 &gpi_dev->max_gpii);
2770 if (ret) {
2771 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2772 return ret;
2773 }
2774
2775 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2776 &gpi_dev->gpii_mask);
2777 if (ret) {
2778 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2779 return ret;
2780 }
2781
2782 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2783 &gpi_dev->ev_factor);
2784 if (ret) {
2785 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2786 return ret;
2787 }
2788
Sujeev Dias69484212017-08-31 10:06:53 -07002789 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,smmu-cfg",
2790 &gpi_dev->smmu_cfg);
2791 if (ret) {
2792 GPI_ERR(gpi_dev, "missing 'qcom,smmu-cfg' DT node\n");
2793 return ret;
2794 }
2795 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2796 u64 iova_range[2];
2797
2798 ret = of_property_count_elems_of_size(gpi_dev->dev->of_node,
2799 "qcom,iova-range",
2800 sizeof(iova_range));
2801 if (ret != 1) {
2802 GPI_ERR(gpi_dev,
2803 "missing or incorrect 'qcom,iova-range' DT node ret:%d\n",
2804 ret);
2805 }
2806
2807 ret = of_property_read_u64_array(gpi_dev->dev->of_node,
2808 "qcom,iova-range", iova_range,
2809 sizeof(iova_range) / sizeof(u64));
2810 if (ret) {
2811 GPI_ERR(gpi_dev,
2812 "could not read DT prop 'qcom,iova-range\n");
2813 return ret;
2814 }
2815 gpi_dev->iova_base = iova_range[0];
2816 gpi_dev->iova_size = iova_range[1];
2817 }
2818
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002819 ret = gpi_smmu_init(gpi_dev);
2820 if (ret) {
2821 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2822 return ret;
2823 }
2824
2825 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2826 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2827 GFP_KERNEL);
2828 if (!gpi_dev->gpiis)
2829 return -ENOMEM;
2830
2831
2832 /* setup all the supported gpii */
2833 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2834 for (i = 0; i < gpi_dev->max_gpii; i++) {
2835 struct gpii *gpii = &gpi_dev->gpiis[i];
2836 int chan;
2837
2838 if (!((1 << i) & gpi_dev->gpii_mask))
2839 continue;
2840
2841 /* set up ev cntxt register map */
2842 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2843 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2844 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2845 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2846 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2847 CNTXT_2_RING_BASE_LSB;
2848 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2849 CNTXT_4_RING_RP_LSB;
2850 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2851 CNTXT_6_RING_WP_LSB;
2852 gpii->ev_cmd_reg = gpi_dev->regs +
2853 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2854 gpii->ieob_src_reg = gpi_dev->regs +
2855 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2856 gpii->ieob_clr_reg = gpi_dev->regs +
2857 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2858
2859 /* set up irq */
2860 ret = platform_get_irq(pdev, i);
2861 if (ret < 0) {
2862 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2863 i, ret);
2864 return ret;
2865 }
2866 gpii->irq = ret;
2867
2868 /* set up channel specific register info */
2869 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2870 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2871
2872 /* set up ch cntxt register map */
2873 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2874 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2875 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2876 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2877 gpii_chan->ch_ring_base_lsb_reg =
2878 gpii_chan->ch_cntxt_base_reg +
2879 CNTXT_2_RING_BASE_LSB;
2880 gpii_chan->ch_ring_rp_lsb_reg =
2881 gpii_chan->ch_cntxt_base_reg +
2882 CNTXT_4_RING_RP_LSB;
2883 gpii_chan->ch_ring_wp_lsb_reg =
2884 gpii_chan->ch_cntxt_base_reg +
2885 CNTXT_6_RING_WP_LSB;
2886 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2887 GPI_GPII_n_CH_CMD_OFFS(i);
2888
2889 /* vchan setup */
2890 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2891 gpii_chan->vc.desc_free = gpi_desc_free;
2892 gpii_chan->chid = chan;
2893 gpii_chan->gpii = gpii;
2894 gpii_chan->dir = GPII_CHAN_DIR[chan];
2895 }
2896 mutex_init(&gpii->ctrl_lock);
2897 rwlock_init(&gpii->pm_lock);
2898 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2899 (unsigned long)gpii);
2900 init_completion(&gpii->cmd_completion);
2901 gpii->gpii_id = i;
2902 gpii->regs = gpi_dev->regs;
2903 gpii->gpi_dev = gpi_dev;
2904 atomic_set(&gpii->dbg_index, 0);
2905 }
2906
2907 platform_set_drvdata(pdev, gpi_dev);
2908
2909 /* clear and Set capabilities */
2910 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2911 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2912
2913 /* configure dmaengine apis */
2914 gpi_dev->dma_device.directions =
2915 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2916 gpi_dev->dma_device.residue_granularity =
2917 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2918 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2919 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2920 gpi_dev->dma_device.device_alloc_chan_resources =
2921 gpi_alloc_chan_resources;
2922 gpi_dev->dma_device.device_free_chan_resources =
2923 gpi_free_chan_resources;
2924 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2925 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2926 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2927 gpi_dev->dma_device.device_config = gpi_config;
2928 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2929 gpi_dev->dma_device.dev = gpi_dev->dev;
2930 gpi_dev->dma_device.device_pause = gpi_pause;
2931 gpi_dev->dma_device.device_resume = gpi_resume;
2932
2933 /* register with dmaengine framework */
2934 ret = dma_async_device_register(&gpi_dev->dma_device);
2935 if (ret) {
2936 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2937 return ret;
2938 }
2939
2940 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2941 gpi_of_dma_xlate, gpi_dev);
2942 if (ret) {
2943 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2944 return ret;
2945 }
2946
2947 /* setup debug capabilities */
2948 gpi_setup_debug(gpi_dev);
2949 GPI_LOG(gpi_dev, "probe success\n");
2950
2951 return ret;
2952}
2953
2954static const struct of_device_id gpi_of_match[] = {
2955 { .compatible = "qcom,gpi-dma" },
2956 {}
2957};
2958MODULE_DEVICE_TABLE(of, gpi_of_match);
2959
2960static struct platform_driver gpi_driver = {
2961 .probe = gpi_probe,
2962 .driver = {
2963 .name = GPI_DMA_DRV_NAME,
2964 .of_match_table = gpi_of_match,
2965 },
2966};
2967
2968static int __init gpi_init(void)
2969{
2970 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2971 return platform_driver_register(&gpi_driver);
2972}
2973module_init(gpi_init)
2974
2975MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2976MODULE_LICENSE("GPL v2");