blob: 433f7682ebaf6376f4efcf259440f003ec7ec58c [file] [log] [blame]
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
59#define GPII_REG(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, \
62 ch, __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_VERB(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
70 pr_info("%s:%u:%s: " fmt, gpii->label, \
71 ch, __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_INFO(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_INFO) \
79 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86#define GPII_ERR(gpii, ch, fmt, ...) do { \
87 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
88 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
89 __func__, ##__VA_ARGS__); \
90 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
91 ipc_log_string(gpii->ilctxt, \
92 "ch:%u %s: " fmt, ch, \
93 __func__, ##__VA_ARGS__); \
94 } while (0)
95#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
96 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
97 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
98 __func__, ##__VA_ARGS__); \
99 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
100 ipc_log_string(gpii->ilctxt, \
101 "ch:%u %s: " fmt, ch, \
102 __func__, ##__VA_ARGS__); \
103 } while (0)
104
105enum DEBUG_LOG_LVL {
106 LOG_LVL_MASK_ALL,
107 LOG_LVL_CRITICAL,
108 LOG_LVL_ERROR,
109 LOG_LVL_INFO,
110 LOG_LVL_VERBOSE,
111 LOG_LVL_REG_ACCESS,
112};
113
114enum EV_PRIORITY {
115 EV_PRIORITY_ISR,
116 EV_PRIORITY_TASKLET,
117};
118
119#define GPI_DMA_DRV_NAME "gpi_dma"
120#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
121#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
122#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
123#define IPC_LOG_PAGES (40)
124#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
125#else
126#define IPC_LOG_PAGES (2)
127#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
128#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
129#endif
130
131#define GPI_LABEL_SIZE (256)
132#define GPI_DBG_COMMON (99)
133#define MAX_CHANNELS_PER_GPII (2)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700134#define GPI_TX_CHAN (0)
135#define GPI_RX_CHAN (1)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700136#define CMD_TIMEOUT_MS (50)
137#define STATE_IGNORE (U32_MAX)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700138#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700139
140struct __packed gpi_error_log_entry {
141 u32 routine : 4;
142 u32 type : 4;
143 u32 reserved0 : 4;
144 u32 code : 4;
145 u32 reserved1 : 3;
146 u32 chid : 5;
147 u32 reserved2 : 1;
148 u32 chtype : 1;
149 u32 ee : 1;
150};
151
152struct __packed xfer_compl_event {
153 u64 ptr;
154 u32 length : 24;
155 u8 code;
156 u16 status;
157 u8 type;
158 u8 chid;
159};
160
161struct __packed immediate_data_event {
162 u8 data_bytes[8];
163 u8 length : 4;
164 u8 resvd : 4;
165 u16 tre_index;
166 u8 code;
167 u16 status;
168 u8 type;
169 u8 chid;
170};
171
172struct __packed qup_notif_event {
173 u32 status;
174 u32 time;
175 u32 count :24;
176 u8 resvd;
177 u16 resvd1;
178 u8 type;
179 u8 chid;
180};
181
182struct __packed gpi_ere {
183 u32 dword[4];
184};
185
186enum GPI_EV_TYPE {
187 XFER_COMPLETE_EV_TYPE = 0x22,
188 IMMEDIATE_DATA_EV_TYPE = 0x30,
189 QUP_NOTIF_EV_TYPE = 0x31,
190 STALE_EV_TYPE = 0xFF,
191};
192
193union __packed gpi_event {
194 struct __packed xfer_compl_event xfer_compl_event;
195 struct __packed immediate_data_event immediate_data_event;
196 struct __packed qup_notif_event qup_notif_event;
197 struct __packed gpi_ere gpi_ere;
198};
199
200enum gpii_irq_settings {
201 DEFAULT_IRQ_SETTINGS,
202 MASK_IEOB_SETTINGS,
203};
204
205enum gpi_ev_state {
206 DEFAULT_EV_CH_STATE = 0,
207 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
208 EV_STATE_ALLOCATED,
209 MAX_EV_STATES
210};
211
212static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
213 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
214 [EV_STATE_ALLOCATED] = "ALLOCATED",
215};
216
217#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
218 "INVALID" : gpi_ev_state_str[state])
219
220enum gpi_ch_state {
221 DEFAULT_CH_STATE = 0x0,
222 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
223 CH_STATE_ALLOCATED = 0x1,
224 CH_STATE_STARTED = 0x2,
225 CH_STATE_STOPPED = 0x3,
226 CH_STATE_STOP_IN_PROC = 0x4,
227 CH_STATE_ERROR = 0xf,
228 MAX_CH_STATES
229};
230
231static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
232 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
233 [CH_STATE_ALLOCATED] = "ALLOCATED",
234 [CH_STATE_STARTED] = "STARTED",
235 [CH_STATE_STOPPED] = "STOPPED",
236 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
237 [CH_STATE_ERROR] = "ERROR",
238};
239
240#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
241 "INVALID" : gpi_ch_state_str[state])
242
243enum gpi_cmd {
244 GPI_CH_CMD_BEGIN,
245 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
246 GPI_CH_CMD_START,
247 GPI_CH_CMD_STOP,
248 GPI_CH_CMD_RESET,
249 GPI_CH_CMD_DE_ALLOC,
250 GPI_CH_CMD_UART_SW_STALE,
251 GPI_CH_CMD_UART_RFR_READY,
252 GPI_CH_CMD_UART_RFR_NOT_READY,
253 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
254 GPI_EV_CMD_BEGIN,
255 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
256 GPI_EV_CMD_RESET,
257 GPI_EV_CMD_DEALLOC,
258 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
259 GPI_MAX_CMD,
260};
261
262#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
263
264static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
265 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
266 [GPI_CH_CMD_START] = "CH START",
267 [GPI_CH_CMD_STOP] = "CH STOP",
268 [GPI_CH_CMD_RESET] = "CH_RESET",
269 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
270 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
271 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
272 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
273 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
274 [GPI_EV_CMD_RESET] = "EV RESET",
275 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
276};
277
278#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
279 gpi_cmd_str[cmd])
280
281static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
282 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
283 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
284 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
285 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
286 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
287 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
288};
289
290#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
291 "INVALID" : gpi_cb_event_str[event])
292
293enum se_protocol {
294 SE_PROTOCOL_SPI = 1,
295 SE_PROTOCOL_UART = 2,
296 SE_PROTOCOL_I2C = 3,
297 SE_MAX_PROTOCOL
298};
299
300/*
301 * @DISABLE_STATE: no register access allowed
302 * @CONFIG_STATE: client has configured the channel
303 * @PREP_HARDWARE: register access is allowed
304 * however, no processing EVENTS
305 * @ACTIVE_STATE: channels are fully operational
306 * @PREPARE_TERIMNATE: graceful termination of channels
307 * register access is allowed
308 * @PAUSE_STATE: channels are active, but not processing any events
309 */
310enum gpi_pm_state {
311 DISABLE_STATE,
312 CONFIG_STATE,
313 PREPARE_HARDWARE,
314 ACTIVE_STATE,
315 PREPARE_TERMINATE,
316 PAUSE_STATE,
317 MAX_PM_STATE
318};
319
320#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
321
322static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
323 [DISABLE_STATE] = "DISABLE",
324 [CONFIG_STATE] = "CONFIG",
325 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
326 [ACTIVE_STATE] = "ACTIVE",
327 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
328 [PAUSE_STATE] = "PAUSE",
329};
330
331#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
332 "INVALID" : gpi_pm_state_str[state])
333
334static const struct {
335 enum gpi_cmd gpi_cmd;
336 u32 opcode;
337 u32 state;
338 u32 timeout_ms;
339} gpi_cmd_info[GPI_MAX_CMD] = {
340 {
341 GPI_CH_CMD_ALLOCATE,
342 GPI_GPII_n_CH_CMD_ALLOCATE,
343 CH_STATE_ALLOCATED,
344 CMD_TIMEOUT_MS,
345 },
346 {
347 GPI_CH_CMD_START,
348 GPI_GPII_n_CH_CMD_START,
349 CH_STATE_STARTED,
350 CMD_TIMEOUT_MS,
351 },
352 {
353 GPI_CH_CMD_STOP,
354 GPI_GPII_n_CH_CMD_STOP,
355 CH_STATE_STOPPED,
356 CMD_TIMEOUT_MS,
357 },
358 {
359 GPI_CH_CMD_RESET,
360 GPI_GPII_n_CH_CMD_RESET,
361 CH_STATE_ALLOCATED,
362 CMD_TIMEOUT_MS,
363 },
364 {
365 GPI_CH_CMD_DE_ALLOC,
366 GPI_GPII_n_CH_CMD_DE_ALLOC,
367 CH_STATE_NOT_ALLOCATED,
368 CMD_TIMEOUT_MS,
369 },
370 {
371 GPI_CH_CMD_UART_SW_STALE,
372 GPI_GPII_n_CH_CMD_UART_SW_STALE,
373 STATE_IGNORE,
374 CMD_TIMEOUT_MS,
375 },
376 {
377 GPI_CH_CMD_UART_RFR_READY,
378 GPI_GPII_n_CH_CMD_UART_RFR_READY,
379 STATE_IGNORE,
380 CMD_TIMEOUT_MS,
381 },
382 {
383 GPI_CH_CMD_UART_RFR_NOT_READY,
384 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
385 STATE_IGNORE,
386 CMD_TIMEOUT_MS,
387 },
388 {
389 GPI_EV_CMD_ALLOCATE,
390 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
391 EV_STATE_ALLOCATED,
392 CMD_TIMEOUT_MS,
393 },
394 {
395 GPI_EV_CMD_RESET,
396 GPI_GPII_n_EV_CH_CMD_RESET,
397 EV_STATE_ALLOCATED,
398 CMD_TIMEOUT_MS,
399 },
400 {
401 GPI_EV_CMD_DEALLOC,
402 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
403 EV_STATE_NOT_ALLOCATED,
404 CMD_TIMEOUT_MS,
405 },
406};
407
408struct gpi_ring {
409 void *pre_aligned;
410 size_t alloc_size;
411 phys_addr_t phys_addr;
412 dma_addr_t dma_handle;
413 void *base;
414 void *wp;
415 void *rp;
416 u32 len;
417 u32 el_size;
418 u32 elements;
419 bool configured;
420};
421
422struct sg_tre {
423 void *ptr;
424 void *wp; /* store chan wp for debugging */
425};
426
427struct gpi_dbg_log {
428 void *addr;
429 u64 time;
430 u32 val;
431 bool read;
432};
433
434struct gpi_dev {
435 struct dma_device dma_device;
436 struct device *dev;
437 struct resource *res;
438 void __iomem *regs;
439 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
440 u32 gpii_mask; /* gpii instances available for apps */
441 u32 ev_factor; /* ev ring length factor */
Sujeev Dias69484212017-08-31 10:06:53 -0700442 u32 smmu_cfg;
443 dma_addr_t iova_base;
444 size_t iova_size;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700445 struct gpii *gpiis;
446 void *ilctxt;
447 u32 ipc_log_lvl;
448 u32 klog_lvl;
449 struct dentry *dentry;
450};
451
452struct gpii_chan {
453 struct virt_dma_chan vc;
454 u32 chid;
455 u32 seid;
456 enum se_protocol protocol;
457 enum EV_PRIORITY priority; /* comes from clients DT node */
458 struct gpii *gpii;
459 enum gpi_ch_state ch_state;
460 enum gpi_pm_state pm_state;
461 void __iomem *ch_cntxt_base_reg;
462 void __iomem *ch_cntxt_db_reg;
463 void __iomem *ch_ring_base_lsb_reg,
464 *ch_ring_rp_lsb_reg,
465 *ch_ring_wp_lsb_reg;
466 void __iomem *ch_cmd_reg;
467 u32 req_tres; /* # of tre's client requested */
468 u32 dir;
469 struct gpi_ring ch_ring;
470 struct gpi_ring sg_ring; /* points to client scatterlist */
471 struct gpi_client_info client_info;
472};
473
474struct gpii {
475 u32 gpii_id;
476 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
477 struct gpi_dev *gpi_dev;
478 enum EV_PRIORITY ev_priority;
479 enum se_protocol protocol;
480 int irq;
481 void __iomem *regs; /* points to gpi top */
482 void __iomem *ev_cntxt_base_reg;
483 void __iomem *ev_cntxt_db_reg;
484 void __iomem *ev_ring_base_lsb_reg,
485 *ev_ring_rp_lsb_reg,
486 *ev_ring_wp_lsb_reg;
487 void __iomem *ev_cmd_reg;
488 void __iomem *ieob_src_reg;
489 void __iomem *ieob_clr_reg;
490 struct mutex ctrl_lock;
491 enum gpi_ev_state ev_state;
492 bool configured_irq;
493 enum gpi_pm_state pm_state;
494 rwlock_t pm_lock;
495 struct gpi_ring ev_ring;
496 struct tasklet_struct ev_task; /* event processing tasklet */
497 struct completion cmd_completion;
498 enum gpi_cmd gpi_cmd;
499 u32 cntxt_type_irq_msk;
500 void *ilctxt;
501 u32 ipc_log_lvl;
502 u32 klog_lvl;
503 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
504 atomic_t dbg_index;
505 char label[GPI_LABEL_SIZE];
506 struct dentry *dentry;
507};
508
509struct gpi_desc {
510 struct virt_dma_desc vd;
511 void *wp; /* points to TRE last queued during issue_pending */
512 struct sg_tre *sg_tre; /* points to last scatterlist */
513 void *db; /* DB register to program */
514 struct gpii_chan *gpii_chan;
515};
516
Sujeev Dias69484212017-08-31 10:06:53 -0700517#define GPI_SMMU_ATTACH BIT(0)
518#define GPI_SMMU_S1_BYPASS BIT(1)
519#define GPI_SMMU_FAST BIT(2)
520#define GPI_SMMU_ATOMIC BIT(3)
521
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700522const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
523 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
524};
525
526struct dentry *pdentry;
527static irqreturn_t gpi_handle_irq(int irq, void *data);
528static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
529static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
530static void gpi_process_events(struct gpii *gpii);
531
532static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
533{
534 return container_of(dma_chan, struct gpii_chan, vc.chan);
535}
536
537static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
538{
539 return container_of(vd, struct gpi_desc, vd);
540}
541
542static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
543 void *addr)
544{
545 return ring->phys_addr + (addr - ring->base);
546}
547
548static inline void *to_virtual(const struct gpi_ring *const ring,
549 phys_addr_t addr)
550{
551 return ring->base + (addr - ring->phys_addr);
552}
553
554#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
555static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
556{
557 u64 time = sched_clock();
558 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
559 u32 val;
560
561 val = readl_relaxed(addr);
562 index &= (GPI_DBG_LOG_SIZE - 1);
563 (gpii->dbg_log + index)->addr = addr;
564 (gpii->dbg_log + index)->time = time;
565 (gpii->dbg_log + index)->val = val;
566 (gpii->dbg_log + index)->read = true;
567 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
568 addr - gpii->regs, val);
569 return val;
570}
571static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
572{
573 u64 time = sched_clock();
574 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
575
576 index &= (GPI_DBG_LOG_SIZE - 1);
577 (gpii->dbg_log + index)->addr = addr;
578 (gpii->dbg_log + index)->time = time;
579 (gpii->dbg_log + index)->val = val;
580 (gpii->dbg_log + index)->read = false;
581
582 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
583 addr - gpii->regs, val);
584 writel_relaxed(val, addr);
585}
586#else
587static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
588{
589 u32 val = readl_relaxed(addr);
590
591 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
592 addr - gpii->regs, val);
593 return val;
594}
595static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
596{
597 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
598 addr - gpii->regs, val);
599 writel_relaxed(val, addr);
600}
601#endif
602
603/* gpi_write_reg_field - write to specific bit field */
604static inline void gpi_write_reg_field(struct gpii *gpii,
605 void __iomem *addr,
606 u32 mask,
607 u32 shift,
608 u32 val)
609{
610 u32 tmp = gpi_read_reg(gpii, addr);
611
612 tmp &= ~mask;
613 val = tmp | ((val << shift) & mask);
614 gpi_write_reg(gpii, addr, val);
615}
616
617static void gpi_disable_interrupts(struct gpii *gpii)
618{
619 struct {
620 u32 offset;
621 u32 mask;
622 u32 shift;
623 u32 val;
624 } default_reg[] = {
625 {
626 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
627 (gpii->gpii_id),
628 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
629 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
630 0,
631 },
632 {
633 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
634 (gpii->gpii_id),
635 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
636 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
637 0,
638 },
639 {
640 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
641 (gpii->gpii_id),
642 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
643 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
644 0,
645 },
646 {
647 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
648 (gpii->gpii_id),
649 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
650 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
651 0,
652 },
653 {
654 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
655 (gpii->gpii_id),
656 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
657 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
658 0,
659 },
660 {
661 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
662 (gpii->gpii_id),
663 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
664 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
665 0,
666 },
667 {
668 GPI_GPII_n_CNTXT_INTSET_OFFS
669 (gpii->gpii_id),
670 GPI_GPII_n_CNTXT_INTSET_BMSK,
671 GPI_GPII_n_CNTXT_INTSET_SHFT,
672 0,
673 },
674 { 0 },
675 };
676 int i;
677
678 for (i = 0; default_reg[i].offset; i++)
679 gpi_write_reg_field(gpii, gpii->regs +
680 default_reg[i].offset,
681 default_reg[i].mask,
682 default_reg[i].shift,
683 default_reg[i].val);
684 gpii->cntxt_type_irq_msk = 0;
685 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
686 gpii->configured_irq = false;
687}
688
689/* configure and enable interrupts */
690static int gpi_config_interrupts(struct gpii *gpii,
691 enum gpii_irq_settings settings,
692 bool mask)
693{
694 int ret;
695 int i;
696 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
697 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
698 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
699 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
700 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
701 struct {
702 u32 offset;
703 u32 mask;
704 u32 shift;
705 u32 val;
706 } default_reg[] = {
707 {
708 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
709 (gpii->gpii_id),
710 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
711 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
712 def_type,
713 },
714 {
715 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
716 (gpii->gpii_id),
717 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
718 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
719 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
720 },
721 {
722 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
723 (gpii->gpii_id),
724 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
725 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
726 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
727 },
728 {
729 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
730 (gpii->gpii_id),
731 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
732 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
733 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
734 },
735 {
736 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
737 (gpii->gpii_id),
738 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
739 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
740 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
741 },
742 {
743 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
744 (gpii->gpii_id),
745 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
746 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
747 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
748 },
749 {
750 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
751 (gpii->gpii_id),
752 U32_MAX,
753 0,
754 0x0,
755 },
756 {
757 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
758 (gpii->gpii_id),
759 U32_MAX,
760 0,
761 0x0,
762 },
763 {
764 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
765 (gpii->gpii_id),
766 U32_MAX,
767 0,
768 0x0,
769 },
770 {
771 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
772 (gpii->gpii_id),
773 U32_MAX,
774 0,
775 0x0,
776 },
777 {
778 GPI_GPII_n_CNTXT_INTSET_OFFS
779 (gpii->gpii_id),
780 GPI_GPII_n_CNTXT_INTSET_BMSK,
781 GPI_GPII_n_CNTXT_INTSET_SHFT,
782 0x01,
783 },
784 {
785 GPI_GPII_n_ERROR_LOG_OFFS
786 (gpii->gpii_id),
787 U32_MAX,
788 0,
789 0x00,
790 },
791 { 0 },
792 };
793
794 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
795 (gpii->configured_irq) ? 'F' : 'T',
796 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
797 (mask) ? 'T' : 'F');
798
799 if (gpii->configured_irq == false) {
800 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
801 gpi_handle_irq, IRQF_TRIGGER_HIGH,
802 gpii->label, gpii);
803 if (ret < 0) {
804 GPII_CRITIC(gpii, GPI_DBG_COMMON,
805 "error request irq:%d ret:%d\n",
806 gpii->irq, ret);
807 return ret;
808 }
809 }
810
811 if (settings == MASK_IEOB_SETTINGS) {
812 /*
813 * GPII only uses one EV ring per gpii so we can globally
814 * enable/disable IEOB interrupt
815 */
816 if (mask)
817 gpii->cntxt_type_irq_msk |=
818 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
819 else
820 gpii->cntxt_type_irq_msk &=
821 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
822 gpi_write_reg_field(gpii, gpii->regs +
823 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
824 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
825 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
826 gpii->cntxt_type_irq_msk);
827 } else {
828 for (i = 0; default_reg[i].offset; i++)
829 gpi_write_reg_field(gpii, gpii->regs +
830 default_reg[i].offset,
831 default_reg[i].mask,
832 default_reg[i].shift,
833 default_reg[i].val);
834 gpii->cntxt_type_irq_msk = def_type;
835 };
836
837 gpii->configured_irq = true;
838
839 return 0;
840}
841
842/* Sends gpii event or channel command */
843static int gpi_send_cmd(struct gpii *gpii,
844 struct gpii_chan *gpii_chan,
845 enum gpi_cmd gpi_cmd)
846{
847 u32 chid = MAX_CHANNELS_PER_GPII;
848 u32 cmd;
849 unsigned long timeout;
850 void __iomem *cmd_reg;
851
852 if (gpi_cmd >= GPI_MAX_CMD)
853 return -EINVAL;
854 if (IS_CHAN_CMD(gpi_cmd))
855 chid = gpii_chan->chid;
856
857 GPII_INFO(gpii, chid,
858 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
859
860 /* send opcode and wait for completion */
861 reinit_completion(&gpii->cmd_completion);
862 gpii->gpi_cmd = gpi_cmd;
863
864 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
865 gpii->ev_cmd_reg;
866 cmd = IS_CHAN_CMD(gpi_cmd) ?
867 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
868 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
869 gpi_write_reg(gpii, cmd_reg, cmd);
870 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
871 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
872
873 if (!timeout) {
874 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
875 TO_GPI_CMD_STR(gpi_cmd));
876 return -EIO;
877 }
878
879 /* confirm new ch state is correct , if the cmd is a state change cmd */
880 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
881 return 0;
882 if (IS_CHAN_CMD(gpi_cmd) &&
883 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
884 return 0;
885 if (!IS_CHAN_CMD(gpi_cmd) &&
886 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
887 return 0;
888
889 return -EIO;
890}
891
892/* program transfer ring DB register */
893static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
894 struct gpi_ring *ring,
895 void *wp)
896{
897 struct gpii *gpii = gpii_chan->gpii;
898 phys_addr_t p_wp;
899
900 p_wp = to_physical(ring, wp);
901 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
902}
903
904/* program event ring DB register */
905static inline void gpi_write_ev_db(struct gpii *gpii,
906 struct gpi_ring *ring,
907 void *wp)
908{
909 phys_addr_t p_wp;
910
911 p_wp = ring->phys_addr + (wp - ring->base);
912 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
913}
914
915/* notify client with generic event */
916static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
917 enum msm_gpi_cb_event event,
918 u64 status)
919{
920 struct gpii *gpii = gpii_chan->gpii;
921 struct gpi_client_info *client_info = &gpii_chan->client_info;
922 struct msm_gpi_cb msm_gpi_cb = {0};
923
924 GPII_ERR(gpii, gpii_chan->chid,
925 "notifying event:%s with status:%llu\n",
926 TO_GPI_CB_EVENT_STR(event), status);
927
928 msm_gpi_cb.cb_event = event;
929 msm_gpi_cb.status = status;
930 msm_gpi_cb.timestamp = sched_clock();
931 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
932 client_info->cb_param);
933}
934
935/* process transfer completion interrupt */
936static void gpi_process_ieob(struct gpii *gpii)
937{
938 u32 ieob_irq;
939
940 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
941 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
942 GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
943
944 /* process events based on priority */
945 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
946 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
947 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
948 tasklet_schedule(&gpii->ev_task);
949 } else {
950 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
951 gpi_process_events(gpii);
952 }
953}
954
955/* process channel control interrupt */
956static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
957{
958 u32 gpii_id = gpii->gpii_id;
959 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
960 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
961 u32 chid;
962 struct gpii_chan *gpii_chan;
963 u32 state;
964
965 /* clear the status */
966 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
967 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
968
969 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
970 if (!(BIT(chid) & ch_irq))
971 continue;
972
973 gpii_chan = &gpii->gpii_chan[chid];
974 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
975 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
976 CNTXT_0_CONFIG);
977 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
978 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
979
980 /*
981 * CH_CMD_DEALLOC cmd always successful. However cmd does
982 * not change hardware status. So overwriting software state
983 * to default state.
984 */
985 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
986 state = DEFAULT_CH_STATE;
987 gpii_chan->ch_state = state;
988 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
989 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
990
991 /*
992 * Triggering complete all if ch_state is not a stop in process.
993 * Stop in process is a transition state and we will wait for
994 * stop interrupt before notifying.
995 */
996 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
997 complete_all(&gpii->cmd_completion);
998
999 /* notifying clients if in error state */
1000 if (gpii_chan->ch_state == CH_STATE_ERROR)
1001 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
1002 __LINE__);
1003 }
1004}
1005
1006/* processing gpi level error interrupts */
1007static void gpi_process_glob_err_irq(struct gpii *gpii)
1008{
1009 u32 gpii_id = gpii->gpii_id;
1010 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1011 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1012 u32 error_log;
1013 u32 chid;
1014 struct gpii_chan *gpii_chan;
1015 struct gpi_client_info *client_info;
1016 struct msm_gpi_cb msm_gpi_cb;
1017 struct gpi_error_log_entry *log_entry =
1018 (struct gpi_error_log_entry *)&error_log;
1019
1020 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1021 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1022
1023 /* only error interrupt should be set */
1024 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1025 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1026 irq_stts);
1027 goto error_irq;
1028 }
1029
1030 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1031 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1032 gpi_write_reg(gpii, gpii->regs + offset, 0);
1033
1034 /* get channel info */
1035 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1036 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1037 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1038 chid);
1039 goto error_irq;
1040 }
1041
1042 gpii_chan = &gpii->gpii_chan[chid];
1043 client_info = &gpii_chan->client_info;
1044
1045 /* notify client with error log */
1046 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1047 msm_gpi_cb.error_log.routine = log_entry->routine;
1048 msm_gpi_cb.error_log.type = log_entry->type;
1049 msm_gpi_cb.error_log.error_code = log_entry->code;
1050 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1051 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1052 GPII_ERR(gpii, gpii_chan->chid,
1053 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1054 log_entry->ee, log_entry->chtype,
1055 msm_gpi_cb.error_log.routine,
1056 msm_gpi_cb.error_log.type,
1057 msm_gpi_cb.error_log.error_code);
1058 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1059 client_info->cb_param);
1060
1061 return;
1062
1063error_irq:
1064 for (chid = 0, gpii_chan = gpii->gpii_chan;
1065 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1066 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1067 irq_stts);
1068}
1069
1070/* gpii interrupt handler */
1071static irqreturn_t gpi_handle_irq(int irq, void *data)
1072{
1073 struct gpii *gpii = data;
1074 u32 type;
1075 unsigned long flags;
1076 u32 offset;
1077 u32 gpii_id = gpii->gpii_id;
1078
1079 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1080
1081 read_lock_irqsave(&gpii->pm_lock, flags);
1082
1083 /*
1084 * States are out of sync to receive interrupt
1085 * while software state is in DISABLE state, bailing out.
1086 */
1087 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1088 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1089 "receive interrupt while in %s state\n",
1090 TO_GPI_PM_STR(gpii->pm_state));
1091 goto exit_irq;
1092 }
1093
1094 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1095 type = gpi_read_reg(gpii, gpii->regs + offset);
1096
1097 do {
1098 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1099 type);
1100 /* global gpii error */
1101 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1102 GPII_ERR(gpii, GPI_DBG_COMMON,
1103 "processing global error irq\n");
1104 gpi_process_glob_err_irq(gpii);
1105 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1106 }
1107
1108 /* event control irq */
1109 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1110 u32 ev_state;
1111 u32 ev_ch_irq;
1112
1113 GPII_INFO(gpii, GPI_DBG_COMMON,
1114 "processing EV CTRL interrupt\n");
1115 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1116 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1117
1118 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1119 (gpii_id);
1120 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1121 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1122 CNTXT_0_CONFIG);
1123 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1124 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1125
1126 /*
1127 * CMD EV_CMD_DEALLOC is always successful. However
1128 * cmd does not change hardware status. So overwriting
1129 * software state to default state.
1130 */
1131 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1132 ev_state = DEFAULT_EV_CH_STATE;
1133
1134 gpii->ev_state = ev_state;
1135 GPII_INFO(gpii, GPI_DBG_COMMON,
1136 "setting EV state to %s\n",
1137 TO_GPI_EV_STATE_STR(gpii->ev_state));
1138 complete_all(&gpii->cmd_completion);
1139 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1140 }
1141
1142 /* channel control irq */
1143 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1144 GPII_INFO(gpii, GPI_DBG_COMMON,
1145 "process CH CTRL interrupts\n");
1146 gpi_process_ch_ctrl_irq(gpii);
1147 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1148 }
1149
1150 /* transfer complete interrupt */
1151 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1152 GPII_VERB(gpii, GPI_DBG_COMMON,
1153 "process IEOB interrupts\n");
1154 gpi_process_ieob(gpii);
1155 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1156 }
1157
1158 if (type) {
1159 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1160 "Unhandled interrupt status:0x%x\n", type);
1161 goto exit_irq;
1162 }
1163 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1164 type = gpi_read_reg(gpii, gpii->regs + offset);
1165 } while (type);
1166
1167exit_irq:
1168 read_unlock_irqrestore(&gpii->pm_lock, flags);
1169 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1170
1171 return IRQ_HANDLED;
1172}
1173
1174/* process qup notification events */
1175static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1176 struct qup_notif_event *notif_event)
1177{
1178 struct gpii *gpii = gpii_chan->gpii;
1179 struct gpi_client_info *client_info = &gpii_chan->client_info;
1180 struct msm_gpi_cb msm_gpi_cb;
1181
1182 GPII_VERB(gpii, gpii_chan->chid,
1183 "status:0x%x time:0x%x count:0x%x\n",
1184 notif_event->status, notif_event->time, notif_event->count);
1185
1186 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1187 msm_gpi_cb.status = notif_event->status;
1188 msm_gpi_cb.timestamp = notif_event->time;
1189 msm_gpi_cb.count = notif_event->count;
1190 GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
1191 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1192 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1193 client_info->cb_param);
1194}
1195
1196/* process DMA Immediate completion data events */
1197static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1198 struct immediate_data_event *imed_event)
1199{
1200 struct gpii *gpii = gpii_chan->gpii;
1201 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1202 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1203 struct virt_dma_desc *vd;
1204 struct gpi_desc *gpi_desc;
1205 struct msm_gpi_tre *client_tre;
1206 void *sg_tre;
1207 void *tre = ch_ring->base +
1208 (ch_ring->el_size * imed_event->tre_index);
1209 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
Sujeev Diase0197092017-11-27 20:36:26 -08001210 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001211
1212 /*
1213 * If channel not active don't process event but let
1214 * client know pending event is available
1215 */
1216 if (gpii_chan->pm_state != ACTIVE_STATE) {
1217 GPII_ERR(gpii, gpii_chan->chid,
1218 "skipping processing event because ch @ %s state\n",
1219 TO_GPI_PM_STR(gpii_chan->pm_state));
1220 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1221 __LINE__);
1222 return;
1223 }
1224
Sujeev Diase0197092017-11-27 20:36:26 -08001225 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001226 vd = vchan_next_desc(&gpii_chan->vc);
1227 if (!vd) {
1228 struct gpi_ere *gpi_ere;
1229 struct msm_gpi_tre *gpi_tre;
1230
Sujeev Diase0197092017-11-27 20:36:26 -08001231 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001232 GPII_ERR(gpii, gpii_chan->chid,
1233 "event without a pending descriptor!\n");
1234 gpi_ere = (struct gpi_ere *)imed_event;
1235 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1236 gpi_ere->dword[0], gpi_ere->dword[1],
1237 gpi_ere->dword[2], gpi_ere->dword[3]);
1238 gpi_tre = tre;
1239 GPII_ERR(gpii, gpii_chan->chid,
1240 "Pending TRE: %08x %08x %08x %08x\n",
1241 gpi_tre->dword[0], gpi_tre->dword[1],
1242 gpi_tre->dword[2], gpi_tre->dword[3]);
1243 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1244 __LINE__);
1245 return;
1246 }
1247 gpi_desc = to_gpi_desc(vd);
1248
1249 /* Event TR RP gen. don't match descriptor TR */
1250 if (gpi_desc->wp != tre) {
Sujeev Diase0197092017-11-27 20:36:26 -08001251 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001252 GPII_ERR(gpii, gpii_chan->chid,
1253 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1254 to_physical(ch_ring, gpi_desc->wp),
1255 to_physical(ch_ring, tre));
1256 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1257 __LINE__);
1258 return;
1259 }
1260
1261 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001262 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001263
1264 sg_tre = gpi_desc->sg_tre;
1265 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1266
1267 /*
1268 * RP pointed by Event is to last TRE processed,
1269 * we need to update ring rp to tre + 1
1270 */
1271 tre += ch_ring->el_size;
1272 if (tre >= (ch_ring->base + ch_ring->len))
1273 tre = ch_ring->base;
1274 ch_ring->rp = tre;
1275 sg_tre += sg_ring->el_size;
1276 if (sg_tre >= (sg_ring->base + sg_ring->len))
1277 sg_tre = sg_ring->base;
1278 sg_ring->rp = sg_tre;
1279
1280 /* make sure rp updates are immediately visible to all cores */
1281 smp_wmb();
1282
1283 /* update Immediate data from Event back in to TRE if it's RX channel */
1284 if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
1285 client_tre->dword[0] =
1286 ((struct msm_gpi_tre *)imed_event)->dword[0];
1287 client_tre->dword[1] =
1288 ((struct msm_gpi_tre *)imed_event)->dword[1];
1289 client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
1290 imed_event->length);
1291 }
1292
1293 tx_cb_param = vd->tx.callback_param;
1294 if (tx_cb_param) {
1295 GPII_VERB(gpii, gpii_chan->chid,
1296 "cb_length:%u compl_code:0x%x status:0x%x\n",
1297 imed_event->length, imed_event->code,
1298 imed_event->status);
1299 tx_cb_param->length = imed_event->length;
1300 tx_cb_param->completion_code = imed_event->code;
1301 tx_cb_param->status = imed_event->status;
1302 }
1303
Sujeev Diase0197092017-11-27 20:36:26 -08001304 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001305 vchan_cookie_complete(vd);
Sujeev Diase0197092017-11-27 20:36:26 -08001306 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001307}
1308
1309/* processing transfer completion events */
1310static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1311 struct xfer_compl_event *compl_event)
1312{
1313 struct gpii *gpii = gpii_chan->gpii;
1314 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1315 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1316 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
1317 struct msm_gpi_tre *client_tre;
1318 struct virt_dma_desc *vd;
1319 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1320 struct gpi_desc *gpi_desc;
1321 void *sg_tre = NULL;
Sujeev Diase0197092017-11-27 20:36:26 -08001322 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001323
1324 /* only process events on active channel */
1325 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1326 GPII_ERR(gpii, gpii_chan->chid,
1327 "skipping processing event because ch @ %s state\n",
1328 TO_GPI_PM_STR(gpii_chan->pm_state));
1329 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1330 __LINE__);
1331 return;
1332 }
1333
Sujeev Diase0197092017-11-27 20:36:26 -08001334 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001335 vd = vchan_next_desc(&gpii_chan->vc);
1336 if (!vd) {
1337 struct gpi_ere *gpi_ere;
1338
Sujeev Diase0197092017-11-27 20:36:26 -08001339 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001340 GPII_ERR(gpii, gpii_chan->chid,
1341 "Event without a pending descriptor!\n");
1342 gpi_ere = (struct gpi_ere *)compl_event;
1343 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1344 gpi_ere->dword[0], gpi_ere->dword[1],
1345 gpi_ere->dword[2], gpi_ere->dword[3]);
1346 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1347 __LINE__);
1348 return;
1349 }
1350
1351 gpi_desc = to_gpi_desc(vd);
1352
1353 /* TRE Event generated didn't match descriptor's TRE */
1354 if (gpi_desc->wp != ev_rp) {
Sujeev Diase0197092017-11-27 20:36:26 -08001355 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001356 GPII_ERR(gpii, gpii_chan->chid,
1357 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1358 to_physical(ch_ring, gpi_desc->wp),
1359 to_physical(ch_ring, ev_rp));
1360 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1361 __LINE__);
1362 return;
1363 }
1364
1365 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001366 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001367
1368 sg_tre = gpi_desc->sg_tre;
1369 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1370
1371 /*
1372 * RP pointed by Event is to last TRE processed,
1373 * we need to update ring rp to ev_rp + 1
1374 */
1375 ev_rp += ch_ring->el_size;
1376 if (ev_rp >= (ch_ring->base + ch_ring->len))
1377 ev_rp = ch_ring->base;
1378 ch_ring->rp = ev_rp;
1379 sg_tre += sg_ring->el_size;
1380 if (sg_tre >= (sg_ring->base + sg_ring->len))
1381 sg_tre = sg_ring->base;
1382 sg_ring->rp = sg_tre;
1383
1384 /* update must be visible to other cores */
1385 smp_wmb();
1386
1387 tx_cb_param = vd->tx.callback_param;
1388 if (tx_cb_param) {
1389 GPII_VERB(gpii, gpii_chan->chid,
1390 "cb_length:%u compl_code:0x%x status:0x%x\n",
1391 compl_event->length, compl_event->code,
1392 compl_event->status);
1393 tx_cb_param->length = compl_event->length;
1394 tx_cb_param->completion_code = compl_event->code;
1395 tx_cb_param->status = compl_event->status;
1396 }
1397
Sujeev Diase0197092017-11-27 20:36:26 -08001398 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001399 vchan_cookie_complete(vd);
Sujeev Diase0197092017-11-27 20:36:26 -08001400 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001401}
1402
1403/* process all events */
1404static void gpi_process_events(struct gpii *gpii)
1405{
1406 struct gpi_ring *ev_ring = &gpii->ev_ring;
1407 u32 cntxt_rp, local_rp;
1408 union gpi_event *gpi_event;
1409 struct gpii_chan *gpii_chan;
1410 u32 chid, type;
1411 u32 ieob_irq;
1412
1413 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1414 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1415
1416 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
1417 cntxt_rp, local_rp);
1418
1419 do {
1420 while (local_rp != cntxt_rp) {
1421 gpi_event = ev_ring->rp;
1422 chid = gpi_event->xfer_compl_event.chid;
1423 type = gpi_event->xfer_compl_event.type;
1424 GPII_VERB(gpii, GPI_DBG_COMMON,
1425 "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
1426 local_rp, chid, type,
1427 gpi_event->gpi_ere.dword[0],
1428 gpi_event->gpi_ere.dword[1],
1429 gpi_event->gpi_ere.dword[2],
1430 gpi_event->gpi_ere.dword[3]);
1431
1432 switch (type) {
1433 case XFER_COMPLETE_EV_TYPE:
1434 gpii_chan = &gpii->gpii_chan[chid];
1435 gpi_process_xfer_compl_event(gpii_chan,
1436 &gpi_event->xfer_compl_event);
1437 break;
1438 case STALE_EV_TYPE:
1439 GPII_VERB(gpii, GPI_DBG_COMMON,
1440 "stale event, not processing\n");
1441 break;
1442 case IMMEDIATE_DATA_EV_TYPE:
1443 gpii_chan = &gpii->gpii_chan[chid];
1444 gpi_process_imed_data_event(gpii_chan,
1445 &gpi_event->immediate_data_event);
1446 break;
1447 case QUP_NOTIF_EV_TYPE:
1448 gpii_chan = &gpii->gpii_chan[chid];
1449 gpi_process_qup_notif_event(gpii_chan,
1450 &gpi_event->qup_notif_event);
1451 break;
1452 default:
1453 GPII_VERB(gpii, GPI_DBG_COMMON,
1454 "not supported event type:0x%x\n",
1455 type);
1456 }
1457 gpi_ring_recycle_ev_element(ev_ring);
1458 local_rp = (u32)to_physical(ev_ring,
1459 (void *)ev_ring->rp);
1460 }
1461 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1462
1463 /* clear pending IEOB events */
1464 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
1465 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
1466
1467 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1468 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1469
1470 } while (cntxt_rp != local_rp);
1471
1472 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
1473 local_rp);
1474}
1475
1476/* processing events using tasklet */
1477static void gpi_ev_tasklet(unsigned long data)
1478{
1479 struct gpii *gpii = (struct gpii *)data;
1480
1481 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1482
1483 read_lock_bh(&gpii->pm_lock);
1484 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1485 read_unlock_bh(&gpii->pm_lock);
1486 GPII_ERR(gpii, GPI_DBG_COMMON,
1487 "not processing any events, pm_state:%s\n",
1488 TO_GPI_PM_STR(gpii->pm_state));
1489 return;
1490 }
1491
1492 /* process the events */
1493 gpi_process_events(gpii);
1494
1495 /* enable IEOB, switching back to interrupts */
1496 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1497 read_unlock_bh(&gpii->pm_lock);
1498
1499 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1500}
1501
1502/* marks all pending events for the channel as stale */
1503void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1504{
1505 struct gpii *gpii = gpii_chan->gpii;
1506 struct gpi_ring *ev_ring = &gpii->ev_ring;
1507 void *ev_rp;
1508 u32 cntxt_rp, local_rp;
1509
1510 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1511 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1512
1513 ev_rp = ev_ring->rp;
1514 local_rp = (u32)to_physical(ev_ring, ev_rp);
1515 while (local_rp != cntxt_rp) {
1516 union gpi_event *gpi_event = ev_rp;
1517 u32 chid = gpi_event->xfer_compl_event.chid;
1518
1519 if (chid == gpii_chan->chid)
1520 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1521 ev_rp += ev_ring->el_size;
1522 if (ev_rp >= (ev_ring->base + ev_ring->len))
1523 ev_rp = ev_ring->base;
1524 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1525 local_rp = (u32)to_physical(ev_ring, ev_rp);
1526 }
1527}
1528
1529/* reset sw state and issue channel reset or de-alloc */
1530static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1531{
1532 struct gpii *gpii = gpii_chan->gpii;
1533 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1534 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1535 unsigned long flags;
1536 LIST_HEAD(list);
1537 int ret;
1538
1539 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1540 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1541 if (ret) {
1542 GPII_ERR(gpii, gpii_chan->chid,
1543 "Error with cmd:%s ret:%d\n",
1544 TO_GPI_CMD_STR(gpi_cmd), ret);
1545 return ret;
1546 }
1547
1548 /* initialize the local ring ptrs */
1549 ch_ring->rp = ch_ring->base;
1550 ch_ring->wp = ch_ring->base;
1551 sg_ring->rp = sg_ring->base;
1552 sg_ring->wp = sg_ring->base;
1553
1554 /* visible to other cores */
1555 smp_wmb();
1556
1557 /* check event ring for any stale events */
1558 write_lock_irq(&gpii->pm_lock);
1559 gpi_mark_stale_events(gpii_chan);
1560
1561 /* remove all async descriptors */
1562 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1563 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1564 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1565 write_unlock_irq(&gpii->pm_lock);
1566 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1567
1568 return 0;
1569}
1570
1571static int gpi_start_chan(struct gpii_chan *gpii_chan)
1572{
1573 struct gpii *gpii = gpii_chan->gpii;
1574 int ret;
1575
1576 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1577
1578 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1579 if (ret) {
1580 GPII_ERR(gpii, gpii_chan->chid,
1581 "Error with cmd:%s ret:%d\n",
1582 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1583 return ret;
1584 }
1585
1586 /* gpii CH is active now */
1587 write_lock_irq(&gpii->pm_lock);
1588 gpii_chan->pm_state = ACTIVE_STATE;
1589 write_unlock_irq(&gpii->pm_lock);
1590
1591 return 0;
1592}
1593
1594/* allocate and configure the transfer channel */
1595static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1596{
1597 struct gpii *gpii = gpii_chan->gpii;
1598 struct gpi_ring *ring = &gpii_chan->ch_ring;
1599 int i;
1600 int ret;
1601 struct {
1602 void *base;
1603 int offset;
1604 u32 val;
1605 } ch_reg[] = {
1606 {
1607 gpii_chan->ch_cntxt_base_reg,
1608 CNTXT_0_CONFIG,
1609 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1610 gpii_chan->dir,
1611 GPI_CHTYPE_PROTO_GPI),
1612 },
1613 {
1614 gpii_chan->ch_cntxt_base_reg,
1615 CNTXT_1_R_LENGTH,
1616 ring->len,
1617 },
1618 {
1619 gpii_chan->ch_cntxt_base_reg,
1620 CNTXT_2_RING_BASE_LSB,
1621 (u32)ring->phys_addr,
1622 },
1623 {
1624 gpii_chan->ch_cntxt_base_reg,
1625 CNTXT_3_RING_BASE_MSB,
1626 (u32)(ring->phys_addr >> 32),
1627 },
1628 { /* program MSB of DB register with ring base */
1629 gpii_chan->ch_cntxt_db_reg,
1630 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1631 (u32)(ring->phys_addr >> 32),
1632 },
1633 {
1634 gpii->regs,
1635 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1636 gpii_chan->chid),
1637 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1638 gpii_chan->protocol,
1639 gpii_chan->seid),
1640 },
1641 {
1642 gpii->regs,
1643 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1644 gpii_chan->chid),
1645 0,
1646 },
1647 {
1648 gpii->regs,
1649 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1650 gpii_chan->chid),
1651 0,
1652 },
1653 {
1654 gpii->regs,
1655 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1656 gpii_chan->chid),
1657 0,
1658 },
1659 {
1660 gpii->regs,
1661 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1662 gpii_chan->chid),
1663 1,
1664 },
1665 { NULL },
1666 };
1667
1668 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1669
1670 if (send_alloc_cmd) {
1671 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1672 if (ret) {
1673 GPII_ERR(gpii, gpii_chan->chid,
1674 "Error with cmd:%s ret:%d\n",
1675 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1676 return ret;
1677 }
1678 }
1679
1680 /* program channel cntxt registers */
1681 for (i = 0; ch_reg[i].base; i++)
1682 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1683 ch_reg[i].val);
1684 /* flush all the writes */
1685 wmb();
1686 return 0;
1687}
1688
1689/* allocate and configure event ring */
1690static int gpi_alloc_ev_chan(struct gpii *gpii)
1691{
1692 struct gpi_ring *ring = &gpii->ev_ring;
1693 int i;
1694 int ret;
1695 struct {
1696 void *base;
1697 int offset;
1698 u32 val;
1699 } ev_reg[] = {
1700 {
1701 gpii->ev_cntxt_base_reg,
1702 CNTXT_0_CONFIG,
1703 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1704 GPI_INTTYPE_IRQ,
1705 GPI_CHTYPE_GPI_EV),
1706 },
1707 {
1708 gpii->ev_cntxt_base_reg,
1709 CNTXT_1_R_LENGTH,
1710 ring->len,
1711 },
1712 {
1713 gpii->ev_cntxt_base_reg,
1714 CNTXT_2_RING_BASE_LSB,
1715 (u32)ring->phys_addr,
1716 },
1717 {
1718 gpii->ev_cntxt_base_reg,
1719 CNTXT_3_RING_BASE_MSB,
1720 (u32)(ring->phys_addr >> 32),
1721 },
1722 {
1723 /* program db msg with ring base msb */
1724 gpii->ev_cntxt_db_reg,
1725 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1726 (u32)(ring->phys_addr >> 32),
1727 },
1728 {
1729 gpii->ev_cntxt_base_reg,
1730 CNTXT_8_RING_INT_MOD,
1731 0,
1732 },
1733 {
1734 gpii->ev_cntxt_base_reg,
1735 CNTXT_10_RING_MSI_LSB,
1736 0,
1737 },
1738 {
1739 gpii->ev_cntxt_base_reg,
1740 CNTXT_11_RING_MSI_MSB,
1741 0,
1742 },
1743 {
1744 gpii->ev_cntxt_base_reg,
1745 CNTXT_8_RING_INT_MOD,
1746 0,
1747 },
1748 {
1749 gpii->ev_cntxt_base_reg,
1750 CNTXT_12_RING_RP_UPDATE_LSB,
1751 0,
1752 },
1753 {
1754 gpii->ev_cntxt_base_reg,
1755 CNTXT_13_RING_RP_UPDATE_MSB,
1756 0,
1757 },
1758 { NULL },
1759 };
1760
1761 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1762
1763 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1764 if (ret) {
1765 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1766 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1767 return ret;
1768 }
1769
1770 /* program event context */
1771 for (i = 0; ev_reg[i].base; i++)
1772 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1773 ev_reg[i].val);
1774
1775 /* add events to ring */
1776 ring->wp = (ring->base + ring->len - ring->el_size);
1777
1778 /* flush all the writes */
1779 wmb();
1780
1781 /* gpii is active now */
1782 write_lock_irq(&gpii->pm_lock);
1783 gpii->pm_state = ACTIVE_STATE;
1784 write_unlock_irq(&gpii->pm_lock);
1785 gpi_write_ev_db(gpii, ring, ring->wp);
1786
1787 return 0;
1788}
1789
1790/* calculate # of ERE/TRE available to queue */
1791static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1792{
1793 int elements = 0;
1794
1795 if (ring->wp < ring->rp)
1796 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1797 else {
1798 elements = (ring->rp - ring->base) / ring->el_size;
1799 elements += ((ring->base + ring->len - ring->wp) /
1800 ring->el_size) - 1;
1801 }
1802
1803 return elements;
1804}
1805
1806static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1807{
1808
1809 if (gpi_ring_num_elements_avail(ring) <= 0)
1810 return -ENOMEM;
1811
1812 *wp = ring->wp;
1813 ring->wp += ring->el_size;
1814 if (ring->wp >= (ring->base + ring->len))
1815 ring->wp = ring->base;
1816
1817 /* visible to other cores */
1818 smp_wmb();
1819
1820 return 0;
1821}
1822
1823static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1824{
1825 /* Update the WP */
1826 ring->wp += ring->el_size;
1827 if (ring->wp >= (ring->base + ring->len))
1828 ring->wp = ring->base;
1829
1830 /* Update the RP */
1831 ring->rp += ring->el_size;
1832 if (ring->rp >= (ring->base + ring->len))
1833 ring->rp = ring->base;
1834
1835 /* visible to other cores */
1836 smp_wmb();
1837}
1838
1839static void gpi_free_ring(struct gpi_ring *ring,
1840 struct gpii *gpii)
1841{
1842 if (ring->dma_handle)
1843 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1844 ring->pre_aligned, ring->dma_handle);
1845 else
1846 vfree(ring->pre_aligned);
1847 memset(ring, 0, sizeof(*ring));
1848}
1849
1850/* allocate memory for transfer and event rings */
1851static int gpi_alloc_ring(struct gpi_ring *ring,
1852 u32 elements,
1853 u32 el_size,
1854 struct gpii *gpii,
1855 bool alloc_coherent)
1856{
1857 u64 len = elements * el_size;
1858 int bit;
1859
1860 if (alloc_coherent) {
1861 /* ring len must be power of 2 */
1862 bit = find_last_bit((unsigned long *)&len, 32);
1863 if (((1 << bit) - 1) & len)
1864 bit++;
1865 len = 1 << bit;
1866 ring->alloc_size = (len + (len - 1));
1867 GPII_INFO(gpii, GPI_DBG_COMMON,
1868 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
1869 elements, el_size, (elements * el_size), len,
1870 ring->alloc_size);
1871 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1872 ring->alloc_size,
1873 &ring->dma_handle,
1874 GFP_KERNEL);
1875 if (!ring->pre_aligned) {
1876 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1877 "could not alloc size:%lu mem for ring\n",
1878 ring->alloc_size);
1879 return -ENOMEM;
1880 }
1881
1882 /* align the physical mem */
1883 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1884 ring->base = ring->pre_aligned +
1885 (ring->phys_addr - ring->dma_handle);
1886 } else {
1887 ring->pre_aligned = vmalloc(len);
1888 if (!ring->pre_aligned) {
1889 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1890 "could not allocsize:%llu mem for ring\n",
1891 len);
1892 return -ENOMEM;
1893 }
1894 ring->phys_addr = 0;
1895 ring->dma_handle = 0;
1896 ring->base = ring->pre_aligned;
1897 }
1898
1899 ring->rp = ring->base;
1900 ring->wp = ring->base;
1901 ring->len = len;
1902 ring->el_size = el_size;
1903 ring->elements = ring->len / ring->el_size;
1904 memset(ring->base, 0, ring->len);
1905 ring->configured = true;
1906
1907 /* update to other cores */
1908 smp_wmb();
1909
1910 GPII_INFO(gpii, GPI_DBG_COMMON,
1911 "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
1912 ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
1913 ring->elements);
1914
1915 return 0;
1916}
1917
1918/* copy tre into transfer ring */
1919static void gpi_queue_xfer(struct gpii *gpii,
1920 struct gpii_chan *gpii_chan,
1921 struct msm_gpi_tre *gpi_tre,
1922 void **wp,
1923 struct sg_tre **sg_tre)
1924{
1925 struct msm_gpi_tre *ch_tre;
1926 int ret;
1927
1928 /* get next tre location we can copy */
1929 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1930 if (unlikely(ret)) {
1931 GPII_CRITIC(gpii, gpii_chan->chid,
1932 "Error adding ring element to xfer ring\n");
1933 return;
1934 }
1935 /* get next sg tre location we can use */
1936 ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
1937 if (unlikely(ret)) {
1938 GPII_CRITIC(gpii, gpii_chan->chid,
1939 "Error adding ring element to sg ring\n");
1940 return;
1941 }
1942
1943 /* copy the tre info */
1944 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
1945 (*sg_tre)->ptr = gpi_tre;
1946 (*sg_tre)->wp = ch_tre;
1947 *wp = ch_tre;
1948}
1949
1950/* reset and restart transfer channel */
1951int gpi_terminate_all(struct dma_chan *chan)
1952{
1953 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1954 struct gpii *gpii = gpii_chan->gpii;
1955 int schid, echid, i;
1956 int ret = 0;
1957
1958 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1959 mutex_lock(&gpii->ctrl_lock);
1960
1961 /*
1962 * treat both channels as a group if its protocol is not UART
1963 * STOP, RESET, or START needs to be in lockstep
1964 */
1965 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1966 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1967 MAX_CHANNELS_PER_GPII;
1968
1969 /* stop the channel */
1970 for (i = schid; i < echid; i++) {
1971 gpii_chan = &gpii->gpii_chan[i];
1972
1973 /* disable ch state so no more TRE processing */
1974 write_lock_irq(&gpii->pm_lock);
1975 gpii_chan->pm_state = PREPARE_TERMINATE;
1976 write_unlock_irq(&gpii->pm_lock);
1977
1978 /* send command to Stop the channel */
1979 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1980 if (ret)
1981 GPII_ERR(gpii, gpii_chan->chid,
1982 "Error Stopping Channel:%d resetting anyway\n",
1983 ret);
1984 }
1985
1986 /* reset the channels (clears any pending tre) */
1987 for (i = schid; i < echid; i++) {
1988 gpii_chan = &gpii->gpii_chan[i];
1989
1990 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1991 if (ret) {
1992 GPII_ERR(gpii, gpii_chan->chid,
1993 "Error resetting channel ret:%d\n", ret);
1994 goto terminate_exit;
1995 }
1996
1997 /* reprogram channel CNTXT */
1998 ret = gpi_alloc_chan(gpii_chan, false);
1999 if (ret) {
2000 GPII_ERR(gpii, gpii_chan->chid,
2001 "Error alloc_channel ret:%d\n", ret);
2002 goto terminate_exit;
2003 }
2004 }
2005
2006 /* restart the channels */
2007 for (i = schid; i < echid; i++) {
2008 gpii_chan = &gpii->gpii_chan[i];
2009
2010 ret = gpi_start_chan(gpii_chan);
2011 if (ret) {
2012 GPII_ERR(gpii, gpii_chan->chid,
2013 "Error Starting Channel ret:%d\n", ret);
2014 goto terminate_exit;
2015 }
2016 }
2017
2018terminate_exit:
2019 mutex_unlock(&gpii->ctrl_lock);
2020 return ret;
2021}
2022
2023/* pause dma transfer for all channels */
2024static int gpi_pause(struct dma_chan *chan)
2025{
2026 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2027 struct gpii *gpii = gpii_chan->gpii;
2028 int i, ret;
2029
2030 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
2031 mutex_lock(&gpii->ctrl_lock);
2032
2033 /*
2034 * pause/resume are per gpii not per channel, so
2035 * client needs to call pause only once
2036 */
2037 if (gpii->pm_state == PAUSE_STATE) {
2038 GPII_INFO(gpii, gpii_chan->chid,
2039 "channel is already paused\n");
2040 mutex_unlock(&gpii->ctrl_lock);
2041 return 0;
2042 }
2043
2044 /* send stop command to stop the channels */
2045 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2046 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
2047 if (ret) {
2048 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2049 "Error stopping chan, ret:%d\n", ret);
2050 mutex_unlock(&gpii->ctrl_lock);
2051 return ret;
2052 }
2053 }
2054
2055 disable_irq(gpii->irq);
2056
2057 /* Wait for threads to complete out */
2058 tasklet_kill(&gpii->ev_task);
2059
2060 write_lock_irq(&gpii->pm_lock);
2061 gpii->pm_state = PAUSE_STATE;
2062 write_unlock_irq(&gpii->pm_lock);
2063 mutex_unlock(&gpii->ctrl_lock);
2064
2065 return 0;
2066}
2067
2068/* resume dma transfer */
2069static int gpi_resume(struct dma_chan *chan)
2070{
2071 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2072 struct gpii *gpii = gpii_chan->gpii;
2073 int i;
2074 int ret;
2075
2076 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2077
2078 mutex_lock(&gpii->ctrl_lock);
2079 if (gpii->pm_state == ACTIVE_STATE) {
2080 GPII_INFO(gpii, gpii_chan->chid,
2081 "channel is already active\n");
2082 mutex_unlock(&gpii->ctrl_lock);
2083 return 0;
2084 }
2085
2086 enable_irq(gpii->irq);
2087
2088 /* send start command to start the channels */
2089 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2090 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2091 if (ret) {
2092 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2093 "Erro starting chan, ret:%d\n", ret);
2094 mutex_unlock(&gpii->ctrl_lock);
2095 return ret;
2096 }
2097 }
2098
2099 write_lock_irq(&gpii->pm_lock);
2100 gpii->pm_state = ACTIVE_STATE;
2101 write_unlock_irq(&gpii->pm_lock);
2102 mutex_unlock(&gpii->ctrl_lock);
2103
2104 return 0;
2105}
2106
2107void gpi_desc_free(struct virt_dma_desc *vd)
2108{
2109 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2110
2111 kfree(gpi_desc);
2112}
2113
2114/* copy tre into transfer ring */
2115struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2116 struct scatterlist *sgl,
2117 unsigned int sg_len,
2118 enum dma_transfer_direction direction,
2119 unsigned long flags,
2120 void *context)
2121{
2122 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2123 struct gpii *gpii = gpii_chan->gpii;
2124 u32 nr, sg_nr;
2125 u32 nr_req = 0;
2126 int i, j;
2127 struct scatterlist *sg;
2128 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
2129 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
2130 void *tre, *wp = NULL;
2131 struct sg_tre *sg_tre = NULL;
2132 const gfp_t gfp = GFP_ATOMIC;
2133 struct gpi_desc *gpi_desc;
2134
2135 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2136
2137 if (!is_slave_direction(direction)) {
2138 GPII_ERR(gpii, gpii_chan->chid,
2139 "invalid dma direction: %d\n", direction);
2140 return NULL;
2141 }
2142
2143 /* calculate # of elements required & available */
2144 nr = gpi_ring_num_elements_avail(ch_ring);
2145 sg_nr = gpi_ring_num_elements_avail(sg_ring);
2146 for_each_sg(sgl, sg, sg_len, i) {
2147 GPII_VERB(gpii, gpii_chan->chid,
2148 "%d of %u len:%u\n", i, sg_len, sg->length);
2149 nr_req += (sg->length / ch_ring->el_size);
2150 }
2151 GPII_VERB(gpii, gpii_chan->chid,
2152 "nr_elements_avail:%u sg_avail:%u required:%u\n",
2153 nr, sg_nr, nr_req);
2154
2155 if (nr < nr_req || sg_nr < nr_req) {
2156 GPII_ERR(gpii, gpii_chan->chid,
2157 "not enough space in ring, avail:%u,%u required:%u\n",
2158 nr, sg_nr, nr_req);
2159 return NULL;
2160 }
2161
2162 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2163 if (!gpi_desc) {
2164 GPII_ERR(gpii, gpii_chan->chid,
2165 "out of memory for descriptor\n");
2166 return NULL;
2167 }
2168
2169 /* copy each tre into transfer ring */
2170 for_each_sg(sgl, sg, sg_len, i)
2171 for (j = 0, tre = sg_virt(sg); j < sg->length;
2172 j += ch_ring->el_size, tre += ch_ring->el_size)
2173 gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
2174
2175 /* set up the descriptor */
2176 gpi_desc->db = ch_ring->wp;
2177 gpi_desc->wp = wp;
2178 gpi_desc->sg_tre = sg_tre;
2179 gpi_desc->gpii_chan = gpii_chan;
2180 GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
2181 to_physical(ch_ring, ch_ring->wp),
2182 to_physical(ch_ring, ch_ring->rp));
2183
2184 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2185}
2186
2187/* rings transfer ring db to being transfer */
2188static void gpi_issue_pending(struct dma_chan *chan)
2189{
2190 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2191 struct gpii *gpii = gpii_chan->gpii;
2192 unsigned long flags, pm_lock_flags;
2193 struct virt_dma_desc *vd = NULL;
2194 struct gpi_desc *gpi_desc;
2195
2196 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2197
2198 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2199
2200 /* move all submitted discriptors to issued list */
2201 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2202 if (vchan_issue_pending(&gpii_chan->vc))
2203 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2204 struct virt_dma_desc, node);
2205 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2206
2207 /* nothing to do list is empty */
2208 if (!vd) {
2209 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2210 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2211 return;
2212 }
2213
2214 gpi_desc = to_gpi_desc(vd);
2215 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2216 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2217}
2218
2219/* configure or issue async command */
2220static int gpi_config(struct dma_chan *chan,
2221 struct dma_slave_config *config)
2222{
2223 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2224 struct gpii *gpii = gpii_chan->gpii;
2225 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2226 const int ev_factor = gpii->gpi_dev->ev_factor;
2227 u32 elements;
2228 int i = 0;
2229 int ret = 0;
2230
2231 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2232 if (!gpi_ctrl) {
2233 GPII_ERR(gpii, gpii_chan->chid,
2234 "no config ctrl data provided");
2235 return -EINVAL;
2236 }
2237
2238 mutex_lock(&gpii->ctrl_lock);
2239
2240 switch (gpi_ctrl->cmd) {
2241 case MSM_GPI_INIT:
2242 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2243
2244 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2245 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2246 gpii_chan->pm_state = CONFIG_STATE;
2247
2248 /* check if both channels are configured before continue */
2249 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2250 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2251 goto exit_gpi_init;
2252
2253 /* configure to highest priority from two channels */
2254 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2255 gpii->gpii_chan[1].priority);
2256
2257 /* protocol must be same for both channels */
2258 if (gpii->gpii_chan[0].protocol !=
2259 gpii->gpii_chan[1].protocol) {
2260 GPII_ERR(gpii, gpii_chan->chid,
2261 "protocol did not match protocol %u != %u\n",
2262 gpii->gpii_chan[0].protocol,
2263 gpii->gpii_chan[1].protocol);
2264 ret = -EINVAL;
2265 goto exit_gpi_init;
2266 }
2267 gpii->protocol = gpii_chan->protocol;
2268
2269 /* allocate memory for event ring */
2270 elements = max(gpii->gpii_chan[0].req_tres,
2271 gpii->gpii_chan[1].req_tres);
2272 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
2273 sizeof(union gpi_event), gpii, true);
2274 if (ret) {
2275 GPII_ERR(gpii, gpii_chan->chid,
2276 "error allocating mem for ev ring\n");
2277 goto exit_gpi_init;
2278 }
2279
2280 /* configure interrupts */
2281 write_lock_irq(&gpii->pm_lock);
2282 gpii->pm_state = PREPARE_HARDWARE;
2283 write_unlock_irq(&gpii->pm_lock);
2284 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2285 if (ret) {
2286 GPII_ERR(gpii, gpii_chan->chid,
2287 "error config. interrupts, ret:%d\n", ret);
2288 goto error_config_int;
2289 }
2290
2291 /* allocate event rings */
2292 ret = gpi_alloc_ev_chan(gpii);
2293 if (ret) {
2294 GPII_ERR(gpii, gpii_chan->chid,
2295 "error alloc_ev_chan:%d\n", ret);
2296 goto error_alloc_ev_ring;
2297 }
2298
2299 /* Allocate all channels */
2300 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2301 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2302 if (ret) {
2303 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2304 "Error allocating chan:%d\n", ret);
2305 goto error_alloc_chan;
2306 }
2307 }
2308
2309 /* start channels */
2310 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2311 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2312 if (ret) {
2313 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2314 "Error start chan:%d\n", ret);
2315 goto error_start_chan;
2316 }
2317 }
2318
2319 break;
2320 case MSM_GPI_CMD_UART_SW_STALE:
2321 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2322 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2323 break;
2324 case MSM_GPI_CMD_UART_RFR_READY:
2325 GPII_INFO(gpii, gpii_chan->chid,
2326 "sending UART RFR READY cmd\n");
2327 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2328 break;
2329 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2330 GPII_INFO(gpii, gpii_chan->chid,
2331 "sending UART RFR READY NOT READY cmd\n");
2332 ret = gpi_send_cmd(gpii, gpii_chan,
2333 GPI_CH_CMD_UART_RFR_NOT_READY);
2334 break;
2335 default:
2336 GPII_ERR(gpii, gpii_chan->chid,
2337 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2338 ret = -EINVAL;
2339 }
2340
2341 mutex_unlock(&gpii->ctrl_lock);
2342 return ret;
2343
2344error_start_chan:
2345 for (i = i - 1; i >= 0; i++) {
2346 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2347 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2348 }
2349 i = 2;
2350error_alloc_chan:
2351 for (i = i - 1; i >= 0; i--)
2352 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2353error_alloc_ev_ring:
2354 gpi_disable_interrupts(gpii);
2355error_config_int:
2356 gpi_free_ring(&gpii->ev_ring, gpii);
2357exit_gpi_init:
2358 mutex_unlock(&gpii->ctrl_lock);
2359 return ret;
2360}
2361
2362/* release all channel resources */
2363static void gpi_free_chan_resources(struct dma_chan *chan)
2364{
2365 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2366 struct gpii *gpii = gpii_chan->gpii;
2367 enum gpi_pm_state cur_state;
2368 int ret, i;
2369
2370 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2371
2372 mutex_lock(&gpii->ctrl_lock);
2373
2374 cur_state = gpii_chan->pm_state;
2375
2376 /* disable ch state so no more TRE processing for this channel */
2377 write_lock_irq(&gpii->pm_lock);
2378 gpii_chan->pm_state = PREPARE_TERMINATE;
2379 write_unlock_irq(&gpii->pm_lock);
2380
2381 /* attemp to do graceful hardware shutdown */
2382 if (cur_state == ACTIVE_STATE) {
2383 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2384 if (ret)
2385 GPII_ERR(gpii, gpii_chan->chid,
2386 "error stopping channel:%d\n", ret);
2387
2388 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2389 if (ret)
2390 GPII_ERR(gpii, gpii_chan->chid,
2391 "error resetting channel:%d\n", ret);
2392
2393 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2394 }
2395
2396 /* free all allocated memory */
2397 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2398 gpi_free_ring(&gpii_chan->sg_ring, gpii);
2399 vchan_free_chan_resources(&gpii_chan->vc);
2400
2401 write_lock_irq(&gpii->pm_lock);
2402 gpii_chan->pm_state = DISABLE_STATE;
2403 write_unlock_irq(&gpii->pm_lock);
2404
2405 /* if other rings are still active exit */
2406 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2407 if (gpii->gpii_chan[i].ch_ring.configured)
2408 goto exit_free;
2409
2410 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2411
2412 /* deallocate EV Ring */
2413 cur_state = gpii->pm_state;
2414 write_lock_irq(&gpii->pm_lock);
2415 gpii->pm_state = PREPARE_TERMINATE;
2416 write_unlock_irq(&gpii->pm_lock);
2417
2418 /* wait for threads to complete out */
2419 tasklet_kill(&gpii->ev_task);
2420
2421 /* send command to de allocate event ring */
2422 if (cur_state == ACTIVE_STATE)
2423 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2424
2425 gpi_free_ring(&gpii->ev_ring, gpii);
2426
2427 /* disable interrupts */
2428 if (cur_state == ACTIVE_STATE)
2429 gpi_disable_interrupts(gpii);
2430
2431 /* set final state to disable */
2432 write_lock_irq(&gpii->pm_lock);
2433 gpii->pm_state = DISABLE_STATE;
2434 write_unlock_irq(&gpii->pm_lock);
2435
2436exit_free:
2437 mutex_unlock(&gpii->ctrl_lock);
2438}
2439
2440/* allocate channel resources */
2441static int gpi_alloc_chan_resources(struct dma_chan *chan)
2442{
2443 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2444 struct gpii *gpii = gpii_chan->gpii;
2445 int ret;
2446
2447 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2448
2449 mutex_lock(&gpii->ctrl_lock);
2450
2451 /* allocate memory for transfer ring */
2452 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
2453 sizeof(struct msm_gpi_tre), gpii, true);
2454 if (ret) {
2455 GPII_ERR(gpii, gpii_chan->chid,
2456 "error allocating xfer ring, ret:%d\n", ret);
2457 goto xfer_alloc_err;
2458 }
2459
2460 ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
2461 sizeof(struct sg_tre), gpii, false);
2462 if (ret) {
2463 GPII_ERR(gpii, gpii_chan->chid,
2464 "error allocating sg ring, ret:%d\n", ret);
2465 goto sg_alloc_error;
2466 }
2467 mutex_unlock(&gpii->ctrl_lock);
2468
2469 return 0;
2470
2471sg_alloc_error:
2472 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2473xfer_alloc_err:
2474 mutex_unlock(&gpii->ctrl_lock);
2475
2476 return ret;
2477}
2478
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002479static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2480{
2481 int gpii;
2482 struct gpii_chan *tx_chan, *rx_chan;
2483
2484 /* check if same seid is already configured for another chid */
2485 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2486 if (!((1 << gpii) & gpi_dev->gpii_mask))
2487 continue;
2488
2489 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2490 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2491
2492 if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2493 return gpii;
2494 if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2495 return gpii;
2496 }
2497
2498 /* no channels configured with same seid, return next avail gpii */
2499 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2500 if (!((1 << gpii) & gpi_dev->gpii_mask))
2501 continue;
2502
2503 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2504 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2505
2506 /* check if gpii is configured */
2507 if (tx_chan->vc.chan.client_count ||
2508 rx_chan->vc.chan.client_count)
2509 continue;
2510
2511 /* found a free gpii */
2512 return gpii;
2513 }
2514
2515 /* no gpii instance available to use */
2516 return -EIO;
2517}
2518
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002519/* gpi_of_dma_xlate: open client requested channel */
2520static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2521 struct of_dma *of_dma)
2522{
2523 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002524 u32 seid, chid;
2525 int gpii;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002526 struct gpii_chan *gpii_chan;
2527
2528 if (args->args_count < REQ_OF_DMA_ARGS) {
2529 GPI_ERR(gpi_dev,
2530 "gpii require minimum 6 args, client passed:%d args\n",
2531 args->args_count);
2532 return NULL;
2533 }
2534
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002535 chid = args->args[0];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002536 if (chid >= MAX_CHANNELS_PER_GPII) {
2537 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2538 return NULL;
2539 }
2540
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002541 seid = args->args[1];
2542
2543 /* find next available gpii to use */
2544 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2545 if (gpii < 0) {
2546 GPI_ERR(gpi_dev, "no available gpii instances\n");
2547 return NULL;
2548 }
2549
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002550 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002551 if (gpii_chan->vc.chan.client_count) {
2552 GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
2553 gpii, chid, gpii_chan->seid);
2554 return NULL;
2555 }
2556
2557 /* get ring size, protocol, se_id, and priority */
2558 gpii_chan->seid = seid;
2559 gpii_chan->protocol = args->args[2];
2560 gpii_chan->req_tres = args->args[3];
2561 gpii_chan->priority = args->args[4];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002562
2563 GPI_LOG(gpi_dev,
2564 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2565 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2566 gpii_chan->protocol);
2567
2568 return dma_get_slave_channel(&gpii_chan->vc.chan);
2569}
2570
2571/* gpi_setup_debug - setup debug capabilities */
2572static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2573{
2574 char node_name[GPI_LABEL_SIZE];
2575 const umode_t mode = 0600;
2576 int i;
2577
2578 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2579 (u64)gpi_dev->res->start);
2580
2581 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2582 node_name, 0);
2583 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2584 if (!IS_ERR_OR_NULL(pdentry)) {
2585 snprintf(node_name, sizeof(node_name), "%llx",
2586 (u64)gpi_dev->res->start);
2587 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2588 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2589 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2590 &gpi_dev->ipc_log_lvl);
2591 debugfs_create_u32("klog_lvl", mode,
2592 gpi_dev->dentry, &gpi_dev->klog_lvl);
2593 }
2594 }
2595
2596 for (i = 0; i < gpi_dev->max_gpii; i++) {
2597 struct gpii *gpii;
2598
2599 if (!((1 << i) & gpi_dev->gpii_mask))
2600 continue;
2601
2602 gpii = &gpi_dev->gpiis[i];
2603 snprintf(gpii->label, sizeof(gpii->label),
2604 "%s%llx_gpii%d",
2605 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2606 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2607 gpii->label, 0);
2608 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2609 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2610
2611 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2612 continue;
2613
2614 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2615 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2616 if (IS_ERR_OR_NULL(gpii->dentry))
2617 continue;
2618
2619 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2620 &gpii->ipc_log_lvl);
2621 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2622 &gpii->klog_lvl);
2623 }
2624}
2625
Sujeev Dias69484212017-08-31 10:06:53 -07002626static struct dma_iommu_mapping *gpi_create_mapping(struct gpi_dev *gpi_dev)
2627{
2628 dma_addr_t base;
2629 size_t size;
2630
2631 /*
2632 * If S1_BYPASS enabled then iommu space is not used, however framework
2633 * still require clients to create a mapping space before attaching. So
2634 * set to smallest size required by iommu framework.
2635 */
2636 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2637 base = 0;
2638 size = PAGE_SIZE;
2639 } else {
2640 base = gpi_dev->iova_base;
2641 size = gpi_dev->iova_size;
2642 }
2643
2644 GPI_LOG(gpi_dev, "Creating iommu mapping of base:0x%llx size:%lu\n",
2645 base, size);
2646
2647 return arm_iommu_create_mapping(&platform_bus_type, base, size);
2648}
2649
2650static int gpi_dma_mask(struct gpi_dev *gpi_dev)
2651{
2652 int mask = 64;
2653
2654 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2655 unsigned long addr;
2656
2657 addr = gpi_dev->iova_base + gpi_dev->iova_size + 1;
2658 mask = find_last_bit(&addr, 64);
2659 }
2660
2661 GPI_LOG(gpi_dev, "Setting dma mask to %d\n", mask);
2662
2663 return dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(mask));
2664}
2665
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002666static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2667{
Sujeev Dias69484212017-08-31 10:06:53 -07002668 struct dma_iommu_mapping *mapping = NULL;
2669 int ret;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002670
Sujeev Dias69484212017-08-31 10:06:53 -07002671 if (gpi_dev->smmu_cfg) {
2672
2673 /* create mapping table */
2674 mapping = gpi_create_mapping(gpi_dev);
2675 if (IS_ERR(mapping)) {
2676 GPI_ERR(gpi_dev,
2677 "Failed to create iommu mapping, ret:%ld\n",
2678 PTR_ERR(mapping));
2679 return PTR_ERR(mapping);
2680 }
2681
2682 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2683 int s1_bypass = 1;
2684
2685 ret = iommu_domain_set_attr(mapping->domain,
2686 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
2687 if (ret) {
2688 GPI_ERR(gpi_dev,
2689 "Failed to set attr S1_BYPASS, ret:%d\n",
2690 ret);
2691 goto release_mapping;
2692 }
2693 }
2694
2695 if (gpi_dev->smmu_cfg & GPI_SMMU_FAST) {
2696 int fast = 1;
2697
2698 ret = iommu_domain_set_attr(mapping->domain,
2699 DOMAIN_ATTR_FAST, &fast);
2700 if (ret) {
2701 GPI_ERR(gpi_dev,
2702 "Failed to set attr FAST, ret:%d\n",
2703 ret);
2704 goto release_mapping;
2705 }
2706 }
2707
2708 if (gpi_dev->smmu_cfg & GPI_SMMU_ATOMIC) {
2709 int atomic = 1;
2710
2711 ret = iommu_domain_set_attr(mapping->domain,
2712 DOMAIN_ATTR_ATOMIC, &atomic);
2713 if (ret) {
2714 GPI_ERR(gpi_dev,
2715 "Failed to set attr ATOMIC, ret:%d\n",
2716 ret);
2717 goto release_mapping;
2718 }
2719 }
2720
2721 ret = arm_iommu_attach_device(gpi_dev->dev, mapping);
2722 if (ret) {
2723 GPI_ERR(gpi_dev,
2724 "Failed with iommu_attach, ret:%d\n", ret);
2725 goto release_mapping;
2726 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002727 }
2728
Sujeev Dias69484212017-08-31 10:06:53 -07002729 ret = gpi_dma_mask(gpi_dev);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002730 if (ret) {
Sujeev Dias69484212017-08-31 10:06:53 -07002731 GPI_ERR(gpi_dev, "Error setting dma_mask, ret:%d\n", ret);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002732 goto error_set_mask;
2733 }
2734
2735 return ret;
2736
2737error_set_mask:
Sujeev Dias69484212017-08-31 10:06:53 -07002738 if (gpi_dev->smmu_cfg)
2739 arm_iommu_detach_device(gpi_dev->dev);
2740release_mapping:
2741 if (mapping)
2742 arm_iommu_release_mapping(mapping);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002743 return ret;
2744}
2745
2746static int gpi_probe(struct platform_device *pdev)
2747{
2748 struct gpi_dev *gpi_dev;
2749 int ret, i;
2750
2751 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2752 if (!gpi_dev)
2753 return -ENOMEM;
2754
2755 gpi_dev->dev = &pdev->dev;
2756 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2757 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2758 "gpi-top");
2759 if (!gpi_dev->res) {
2760 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2761 return -EINVAL;
2762 }
2763 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2764 resource_size(gpi_dev->res));
2765 if (!gpi_dev->regs) {
2766 GPI_ERR(gpi_dev, "IO remap failed\n");
2767 return -EFAULT;
2768 }
2769
2770 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2771 &gpi_dev->max_gpii);
2772 if (ret) {
2773 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2774 return ret;
2775 }
2776
2777 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2778 &gpi_dev->gpii_mask);
2779 if (ret) {
2780 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2781 return ret;
2782 }
2783
2784 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2785 &gpi_dev->ev_factor);
2786 if (ret) {
2787 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2788 return ret;
2789 }
2790
Sujeev Dias69484212017-08-31 10:06:53 -07002791 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,smmu-cfg",
2792 &gpi_dev->smmu_cfg);
2793 if (ret) {
2794 GPI_ERR(gpi_dev, "missing 'qcom,smmu-cfg' DT node\n");
2795 return ret;
2796 }
2797 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2798 u64 iova_range[2];
2799
2800 ret = of_property_count_elems_of_size(gpi_dev->dev->of_node,
2801 "qcom,iova-range",
2802 sizeof(iova_range));
2803 if (ret != 1) {
2804 GPI_ERR(gpi_dev,
2805 "missing or incorrect 'qcom,iova-range' DT node ret:%d\n",
2806 ret);
2807 }
2808
2809 ret = of_property_read_u64_array(gpi_dev->dev->of_node,
2810 "qcom,iova-range", iova_range,
2811 sizeof(iova_range) / sizeof(u64));
2812 if (ret) {
2813 GPI_ERR(gpi_dev,
2814 "could not read DT prop 'qcom,iova-range\n");
2815 return ret;
2816 }
2817 gpi_dev->iova_base = iova_range[0];
2818 gpi_dev->iova_size = iova_range[1];
2819 }
2820
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002821 ret = gpi_smmu_init(gpi_dev);
2822 if (ret) {
2823 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2824 return ret;
2825 }
2826
2827 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2828 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2829 GFP_KERNEL);
2830 if (!gpi_dev->gpiis)
2831 return -ENOMEM;
2832
2833
2834 /* setup all the supported gpii */
2835 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2836 for (i = 0; i < gpi_dev->max_gpii; i++) {
2837 struct gpii *gpii = &gpi_dev->gpiis[i];
2838 int chan;
2839
2840 if (!((1 << i) & gpi_dev->gpii_mask))
2841 continue;
2842
2843 /* set up ev cntxt register map */
2844 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2845 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2846 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2847 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2848 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2849 CNTXT_2_RING_BASE_LSB;
2850 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2851 CNTXT_4_RING_RP_LSB;
2852 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2853 CNTXT_6_RING_WP_LSB;
2854 gpii->ev_cmd_reg = gpi_dev->regs +
2855 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2856 gpii->ieob_src_reg = gpi_dev->regs +
2857 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2858 gpii->ieob_clr_reg = gpi_dev->regs +
2859 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2860
2861 /* set up irq */
2862 ret = platform_get_irq(pdev, i);
2863 if (ret < 0) {
2864 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2865 i, ret);
2866 return ret;
2867 }
2868 gpii->irq = ret;
2869
2870 /* set up channel specific register info */
2871 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2872 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2873
2874 /* set up ch cntxt register map */
2875 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2876 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2877 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2878 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2879 gpii_chan->ch_ring_base_lsb_reg =
2880 gpii_chan->ch_cntxt_base_reg +
2881 CNTXT_2_RING_BASE_LSB;
2882 gpii_chan->ch_ring_rp_lsb_reg =
2883 gpii_chan->ch_cntxt_base_reg +
2884 CNTXT_4_RING_RP_LSB;
2885 gpii_chan->ch_ring_wp_lsb_reg =
2886 gpii_chan->ch_cntxt_base_reg +
2887 CNTXT_6_RING_WP_LSB;
2888 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2889 GPI_GPII_n_CH_CMD_OFFS(i);
2890
2891 /* vchan setup */
2892 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2893 gpii_chan->vc.desc_free = gpi_desc_free;
2894 gpii_chan->chid = chan;
2895 gpii_chan->gpii = gpii;
2896 gpii_chan->dir = GPII_CHAN_DIR[chan];
2897 }
2898 mutex_init(&gpii->ctrl_lock);
2899 rwlock_init(&gpii->pm_lock);
2900 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2901 (unsigned long)gpii);
2902 init_completion(&gpii->cmd_completion);
2903 gpii->gpii_id = i;
2904 gpii->regs = gpi_dev->regs;
2905 gpii->gpi_dev = gpi_dev;
2906 atomic_set(&gpii->dbg_index, 0);
2907 }
2908
2909 platform_set_drvdata(pdev, gpi_dev);
2910
2911 /* clear and Set capabilities */
2912 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2913 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2914
2915 /* configure dmaengine apis */
2916 gpi_dev->dma_device.directions =
2917 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2918 gpi_dev->dma_device.residue_granularity =
2919 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2920 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2921 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2922 gpi_dev->dma_device.device_alloc_chan_resources =
2923 gpi_alloc_chan_resources;
2924 gpi_dev->dma_device.device_free_chan_resources =
2925 gpi_free_chan_resources;
2926 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2927 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2928 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2929 gpi_dev->dma_device.device_config = gpi_config;
2930 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2931 gpi_dev->dma_device.dev = gpi_dev->dev;
2932 gpi_dev->dma_device.device_pause = gpi_pause;
2933 gpi_dev->dma_device.device_resume = gpi_resume;
2934
2935 /* register with dmaengine framework */
2936 ret = dma_async_device_register(&gpi_dev->dma_device);
2937 if (ret) {
2938 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2939 return ret;
2940 }
2941
2942 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2943 gpi_of_dma_xlate, gpi_dev);
2944 if (ret) {
2945 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2946 return ret;
2947 }
2948
2949 /* setup debug capabilities */
2950 gpi_setup_debug(gpi_dev);
2951 GPI_LOG(gpi_dev, "probe success\n");
2952
2953 return ret;
2954}
2955
2956static const struct of_device_id gpi_of_match[] = {
2957 { .compatible = "qcom,gpi-dma" },
2958 {}
2959};
2960MODULE_DEVICE_TABLE(of, gpi_of_match);
2961
2962static struct platform_driver gpi_driver = {
2963 .probe = gpi_probe,
2964 .driver = {
2965 .name = GPI_DMA_DRV_NAME,
2966 .of_match_table = gpi_of_match,
2967 },
2968};
2969
2970static int __init gpi_init(void)
2971{
2972 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2973 return platform_driver_register(&gpi_driver);
2974}
2975module_init(gpi_init)
2976
2977MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2978MODULE_LICENSE("GPL v2");