blob: b4c342228ee12815e1fd9e582a13c7c932295c77 [file] [log] [blame]
Sujeev Dias8fc26002017-11-29 20:51:40 -08001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
59#define GPII_REG(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, \
62 ch, __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_VERB(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
70 pr_info("%s:%u:%s: " fmt, gpii->label, \
71 ch, __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_INFO(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_INFO) \
79 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86#define GPII_ERR(gpii, ch, fmt, ...) do { \
87 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
88 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
89 __func__, ##__VA_ARGS__); \
90 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
91 ipc_log_string(gpii->ilctxt, \
92 "ch:%u %s: " fmt, ch, \
93 __func__, ##__VA_ARGS__); \
94 } while (0)
95#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
96 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
97 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
98 __func__, ##__VA_ARGS__); \
99 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
100 ipc_log_string(gpii->ilctxt, \
101 "ch:%u %s: " fmt, ch, \
102 __func__, ##__VA_ARGS__); \
103 } while (0)
104
105enum DEBUG_LOG_LVL {
106 LOG_LVL_MASK_ALL,
107 LOG_LVL_CRITICAL,
108 LOG_LVL_ERROR,
109 LOG_LVL_INFO,
110 LOG_LVL_VERBOSE,
111 LOG_LVL_REG_ACCESS,
112};
113
114enum EV_PRIORITY {
115 EV_PRIORITY_ISR,
116 EV_PRIORITY_TASKLET,
117};
118
119#define GPI_DMA_DRV_NAME "gpi_dma"
120#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
121#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
122#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
123#define IPC_LOG_PAGES (40)
124#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800125#define CMD_TIMEOUT_MS (1000)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700126#else
127#define IPC_LOG_PAGES (2)
128#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
129#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800130#define CMD_TIMEOUT_MS (250)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700131#endif
132
133#define GPI_LABEL_SIZE (256)
134#define GPI_DBG_COMMON (99)
135#define MAX_CHANNELS_PER_GPII (2)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700136#define GPI_TX_CHAN (0)
137#define GPI_RX_CHAN (1)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700138#define STATE_IGNORE (U32_MAX)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700139#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700140
141struct __packed gpi_error_log_entry {
142 u32 routine : 4;
143 u32 type : 4;
144 u32 reserved0 : 4;
145 u32 code : 4;
146 u32 reserved1 : 3;
147 u32 chid : 5;
148 u32 reserved2 : 1;
149 u32 chtype : 1;
150 u32 ee : 1;
151};
152
153struct __packed xfer_compl_event {
154 u64 ptr;
155 u32 length : 24;
156 u8 code;
157 u16 status;
158 u8 type;
159 u8 chid;
160};
161
162struct __packed immediate_data_event {
163 u8 data_bytes[8];
164 u8 length : 4;
165 u8 resvd : 4;
166 u16 tre_index;
167 u8 code;
168 u16 status;
169 u8 type;
170 u8 chid;
171};
172
173struct __packed qup_notif_event {
174 u32 status;
175 u32 time;
176 u32 count :24;
177 u8 resvd;
178 u16 resvd1;
179 u8 type;
180 u8 chid;
181};
182
183struct __packed gpi_ere {
184 u32 dword[4];
185};
186
187enum GPI_EV_TYPE {
188 XFER_COMPLETE_EV_TYPE = 0x22,
189 IMMEDIATE_DATA_EV_TYPE = 0x30,
190 QUP_NOTIF_EV_TYPE = 0x31,
191 STALE_EV_TYPE = 0xFF,
192};
193
194union __packed gpi_event {
195 struct __packed xfer_compl_event xfer_compl_event;
196 struct __packed immediate_data_event immediate_data_event;
197 struct __packed qup_notif_event qup_notif_event;
198 struct __packed gpi_ere gpi_ere;
199};
200
201enum gpii_irq_settings {
202 DEFAULT_IRQ_SETTINGS,
203 MASK_IEOB_SETTINGS,
204};
205
206enum gpi_ev_state {
207 DEFAULT_EV_CH_STATE = 0,
208 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
209 EV_STATE_ALLOCATED,
210 MAX_EV_STATES
211};
212
213static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
214 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
215 [EV_STATE_ALLOCATED] = "ALLOCATED",
216};
217
218#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
219 "INVALID" : gpi_ev_state_str[state])
220
221enum gpi_ch_state {
222 DEFAULT_CH_STATE = 0x0,
223 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
224 CH_STATE_ALLOCATED = 0x1,
225 CH_STATE_STARTED = 0x2,
226 CH_STATE_STOPPED = 0x3,
227 CH_STATE_STOP_IN_PROC = 0x4,
228 CH_STATE_ERROR = 0xf,
229 MAX_CH_STATES
230};
231
232static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
233 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
234 [CH_STATE_ALLOCATED] = "ALLOCATED",
235 [CH_STATE_STARTED] = "STARTED",
236 [CH_STATE_STOPPED] = "STOPPED",
237 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
238 [CH_STATE_ERROR] = "ERROR",
239};
240
241#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
242 "INVALID" : gpi_ch_state_str[state])
243
244enum gpi_cmd {
245 GPI_CH_CMD_BEGIN,
246 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
247 GPI_CH_CMD_START,
248 GPI_CH_CMD_STOP,
249 GPI_CH_CMD_RESET,
250 GPI_CH_CMD_DE_ALLOC,
251 GPI_CH_CMD_UART_SW_STALE,
252 GPI_CH_CMD_UART_RFR_READY,
253 GPI_CH_CMD_UART_RFR_NOT_READY,
254 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
255 GPI_EV_CMD_BEGIN,
256 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
257 GPI_EV_CMD_RESET,
258 GPI_EV_CMD_DEALLOC,
259 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
260 GPI_MAX_CMD,
261};
262
263#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
264
265static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
266 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
267 [GPI_CH_CMD_START] = "CH START",
268 [GPI_CH_CMD_STOP] = "CH STOP",
269 [GPI_CH_CMD_RESET] = "CH_RESET",
270 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
271 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
272 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
273 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
274 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
275 [GPI_EV_CMD_RESET] = "EV RESET",
276 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
277};
278
279#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
280 gpi_cmd_str[cmd])
281
282static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
283 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
284 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
285 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
286 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
287 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
288 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
289};
290
291#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
292 "INVALID" : gpi_cb_event_str[event])
293
294enum se_protocol {
295 SE_PROTOCOL_SPI = 1,
296 SE_PROTOCOL_UART = 2,
297 SE_PROTOCOL_I2C = 3,
298 SE_MAX_PROTOCOL
299};
300
301/*
302 * @DISABLE_STATE: no register access allowed
303 * @CONFIG_STATE: client has configured the channel
304 * @PREP_HARDWARE: register access is allowed
305 * however, no processing EVENTS
306 * @ACTIVE_STATE: channels are fully operational
307 * @PREPARE_TERIMNATE: graceful termination of channels
308 * register access is allowed
309 * @PAUSE_STATE: channels are active, but not processing any events
310 */
311enum gpi_pm_state {
312 DISABLE_STATE,
313 CONFIG_STATE,
314 PREPARE_HARDWARE,
315 ACTIVE_STATE,
316 PREPARE_TERMINATE,
317 PAUSE_STATE,
318 MAX_PM_STATE
319};
320
321#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
322
323static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
324 [DISABLE_STATE] = "DISABLE",
325 [CONFIG_STATE] = "CONFIG",
326 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
327 [ACTIVE_STATE] = "ACTIVE",
328 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
329 [PAUSE_STATE] = "PAUSE",
330};
331
332#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
333 "INVALID" : gpi_pm_state_str[state])
334
335static const struct {
336 enum gpi_cmd gpi_cmd;
337 u32 opcode;
338 u32 state;
339 u32 timeout_ms;
340} gpi_cmd_info[GPI_MAX_CMD] = {
341 {
342 GPI_CH_CMD_ALLOCATE,
343 GPI_GPII_n_CH_CMD_ALLOCATE,
344 CH_STATE_ALLOCATED,
345 CMD_TIMEOUT_MS,
346 },
347 {
348 GPI_CH_CMD_START,
349 GPI_GPII_n_CH_CMD_START,
350 CH_STATE_STARTED,
351 CMD_TIMEOUT_MS,
352 },
353 {
354 GPI_CH_CMD_STOP,
355 GPI_GPII_n_CH_CMD_STOP,
356 CH_STATE_STOPPED,
357 CMD_TIMEOUT_MS,
358 },
359 {
360 GPI_CH_CMD_RESET,
361 GPI_GPII_n_CH_CMD_RESET,
362 CH_STATE_ALLOCATED,
363 CMD_TIMEOUT_MS,
364 },
365 {
366 GPI_CH_CMD_DE_ALLOC,
367 GPI_GPII_n_CH_CMD_DE_ALLOC,
368 CH_STATE_NOT_ALLOCATED,
369 CMD_TIMEOUT_MS,
370 },
371 {
372 GPI_CH_CMD_UART_SW_STALE,
373 GPI_GPII_n_CH_CMD_UART_SW_STALE,
374 STATE_IGNORE,
375 CMD_TIMEOUT_MS,
376 },
377 {
378 GPI_CH_CMD_UART_RFR_READY,
379 GPI_GPII_n_CH_CMD_UART_RFR_READY,
380 STATE_IGNORE,
381 CMD_TIMEOUT_MS,
382 },
383 {
384 GPI_CH_CMD_UART_RFR_NOT_READY,
385 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
386 STATE_IGNORE,
387 CMD_TIMEOUT_MS,
388 },
389 {
390 GPI_EV_CMD_ALLOCATE,
391 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
392 EV_STATE_ALLOCATED,
393 CMD_TIMEOUT_MS,
394 },
395 {
396 GPI_EV_CMD_RESET,
397 GPI_GPII_n_EV_CH_CMD_RESET,
398 EV_STATE_ALLOCATED,
399 CMD_TIMEOUT_MS,
400 },
401 {
402 GPI_EV_CMD_DEALLOC,
403 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
404 EV_STATE_NOT_ALLOCATED,
405 CMD_TIMEOUT_MS,
406 },
407};
408
409struct gpi_ring {
410 void *pre_aligned;
411 size_t alloc_size;
412 phys_addr_t phys_addr;
413 dma_addr_t dma_handle;
414 void *base;
415 void *wp;
416 void *rp;
417 u32 len;
418 u32 el_size;
419 u32 elements;
420 bool configured;
421};
422
423struct sg_tre {
424 void *ptr;
425 void *wp; /* store chan wp for debugging */
426};
427
428struct gpi_dbg_log {
429 void *addr;
430 u64 time;
431 u32 val;
432 bool read;
433};
434
435struct gpi_dev {
436 struct dma_device dma_device;
437 struct device *dev;
438 struct resource *res;
439 void __iomem *regs;
440 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
441 u32 gpii_mask; /* gpii instances available for apps */
442 u32 ev_factor; /* ev ring length factor */
Sujeev Dias69484212017-08-31 10:06:53 -0700443 u32 smmu_cfg;
444 dma_addr_t iova_base;
445 size_t iova_size;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700446 struct gpii *gpiis;
447 void *ilctxt;
448 u32 ipc_log_lvl;
449 u32 klog_lvl;
450 struct dentry *dentry;
451};
452
453struct gpii_chan {
454 struct virt_dma_chan vc;
455 u32 chid;
456 u32 seid;
457 enum se_protocol protocol;
458 enum EV_PRIORITY priority; /* comes from clients DT node */
459 struct gpii *gpii;
460 enum gpi_ch_state ch_state;
461 enum gpi_pm_state pm_state;
462 void __iomem *ch_cntxt_base_reg;
463 void __iomem *ch_cntxt_db_reg;
464 void __iomem *ch_ring_base_lsb_reg,
465 *ch_ring_rp_lsb_reg,
466 *ch_ring_wp_lsb_reg;
467 void __iomem *ch_cmd_reg;
468 u32 req_tres; /* # of tre's client requested */
469 u32 dir;
470 struct gpi_ring ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700471 struct gpi_client_info client_info;
472};
473
474struct gpii {
475 u32 gpii_id;
476 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
477 struct gpi_dev *gpi_dev;
478 enum EV_PRIORITY ev_priority;
479 enum se_protocol protocol;
480 int irq;
481 void __iomem *regs; /* points to gpi top */
482 void __iomem *ev_cntxt_base_reg;
483 void __iomem *ev_cntxt_db_reg;
484 void __iomem *ev_ring_base_lsb_reg,
485 *ev_ring_rp_lsb_reg,
486 *ev_ring_wp_lsb_reg;
487 void __iomem *ev_cmd_reg;
488 void __iomem *ieob_src_reg;
489 void __iomem *ieob_clr_reg;
490 struct mutex ctrl_lock;
491 enum gpi_ev_state ev_state;
492 bool configured_irq;
493 enum gpi_pm_state pm_state;
494 rwlock_t pm_lock;
495 struct gpi_ring ev_ring;
496 struct tasklet_struct ev_task; /* event processing tasklet */
497 struct completion cmd_completion;
498 enum gpi_cmd gpi_cmd;
499 u32 cntxt_type_irq_msk;
500 void *ilctxt;
501 u32 ipc_log_lvl;
502 u32 klog_lvl;
503 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
504 atomic_t dbg_index;
505 char label[GPI_LABEL_SIZE];
506 struct dentry *dentry;
507};
508
509struct gpi_desc {
510 struct virt_dma_desc vd;
511 void *wp; /* points to TRE last queued during issue_pending */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700512 void *db; /* DB register to program */
513 struct gpii_chan *gpii_chan;
514};
515
Sujeev Dias69484212017-08-31 10:06:53 -0700516#define GPI_SMMU_ATTACH BIT(0)
517#define GPI_SMMU_S1_BYPASS BIT(1)
518#define GPI_SMMU_FAST BIT(2)
519#define GPI_SMMU_ATOMIC BIT(3)
520
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700521const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
522 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
523};
524
525struct dentry *pdentry;
526static irqreturn_t gpi_handle_irq(int irq, void *data);
527static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
528static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
529static void gpi_process_events(struct gpii *gpii);
530
531static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
532{
533 return container_of(dma_chan, struct gpii_chan, vc.chan);
534}
535
536static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
537{
538 return container_of(vd, struct gpi_desc, vd);
539}
540
541static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
542 void *addr)
543{
544 return ring->phys_addr + (addr - ring->base);
545}
546
547static inline void *to_virtual(const struct gpi_ring *const ring,
548 phys_addr_t addr)
549{
550 return ring->base + (addr - ring->phys_addr);
551}
552
553#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
554static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
555{
556 u64 time = sched_clock();
557 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
558 u32 val;
559
560 val = readl_relaxed(addr);
561 index &= (GPI_DBG_LOG_SIZE - 1);
562 (gpii->dbg_log + index)->addr = addr;
563 (gpii->dbg_log + index)->time = time;
564 (gpii->dbg_log + index)->val = val;
565 (gpii->dbg_log + index)->read = true;
566 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
567 addr - gpii->regs, val);
568 return val;
569}
570static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
571{
572 u64 time = sched_clock();
573 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
574
575 index &= (GPI_DBG_LOG_SIZE - 1);
576 (gpii->dbg_log + index)->addr = addr;
577 (gpii->dbg_log + index)->time = time;
578 (gpii->dbg_log + index)->val = val;
579 (gpii->dbg_log + index)->read = false;
580
581 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
582 addr - gpii->regs, val);
583 writel_relaxed(val, addr);
584}
585#else
586static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
587{
588 u32 val = readl_relaxed(addr);
589
590 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
591 addr - gpii->regs, val);
592 return val;
593}
594static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
595{
596 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
597 addr - gpii->regs, val);
598 writel_relaxed(val, addr);
599}
600#endif
601
602/* gpi_write_reg_field - write to specific bit field */
603static inline void gpi_write_reg_field(struct gpii *gpii,
604 void __iomem *addr,
605 u32 mask,
606 u32 shift,
607 u32 val)
608{
609 u32 tmp = gpi_read_reg(gpii, addr);
610
611 tmp &= ~mask;
612 val = tmp | ((val << shift) & mask);
613 gpi_write_reg(gpii, addr, val);
614}
615
616static void gpi_disable_interrupts(struct gpii *gpii)
617{
618 struct {
619 u32 offset;
620 u32 mask;
621 u32 shift;
622 u32 val;
623 } default_reg[] = {
624 {
625 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
626 (gpii->gpii_id),
627 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
628 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
629 0,
630 },
631 {
632 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
633 (gpii->gpii_id),
634 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
635 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
636 0,
637 },
638 {
639 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
640 (gpii->gpii_id),
641 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
642 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
643 0,
644 },
645 {
646 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
647 (gpii->gpii_id),
648 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
649 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
650 0,
651 },
652 {
653 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
654 (gpii->gpii_id),
655 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
656 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
657 0,
658 },
659 {
660 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
661 (gpii->gpii_id),
662 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
663 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
664 0,
665 },
666 {
667 GPI_GPII_n_CNTXT_INTSET_OFFS
668 (gpii->gpii_id),
669 GPI_GPII_n_CNTXT_INTSET_BMSK,
670 GPI_GPII_n_CNTXT_INTSET_SHFT,
671 0,
672 },
673 { 0 },
674 };
675 int i;
676
677 for (i = 0; default_reg[i].offset; i++)
678 gpi_write_reg_field(gpii, gpii->regs +
679 default_reg[i].offset,
680 default_reg[i].mask,
681 default_reg[i].shift,
682 default_reg[i].val);
683 gpii->cntxt_type_irq_msk = 0;
684 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
685 gpii->configured_irq = false;
686}
687
688/* configure and enable interrupts */
689static int gpi_config_interrupts(struct gpii *gpii,
690 enum gpii_irq_settings settings,
691 bool mask)
692{
693 int ret;
694 int i;
695 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
696 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
697 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
698 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
699 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
700 struct {
701 u32 offset;
702 u32 mask;
703 u32 shift;
704 u32 val;
705 } default_reg[] = {
706 {
707 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
708 (gpii->gpii_id),
709 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
710 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
711 def_type,
712 },
713 {
714 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
715 (gpii->gpii_id),
716 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
717 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
718 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
719 },
720 {
721 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
722 (gpii->gpii_id),
723 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
724 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
725 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
726 },
727 {
728 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
729 (gpii->gpii_id),
730 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
731 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
732 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
733 },
734 {
735 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
736 (gpii->gpii_id),
737 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
738 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
739 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
740 },
741 {
742 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
743 (gpii->gpii_id),
744 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
745 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
746 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
747 },
748 {
749 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
750 (gpii->gpii_id),
751 U32_MAX,
752 0,
753 0x0,
754 },
755 {
756 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
757 (gpii->gpii_id),
758 U32_MAX,
759 0,
760 0x0,
761 },
762 {
763 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
764 (gpii->gpii_id),
765 U32_MAX,
766 0,
767 0x0,
768 },
769 {
770 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
771 (gpii->gpii_id),
772 U32_MAX,
773 0,
774 0x0,
775 },
776 {
777 GPI_GPII_n_CNTXT_INTSET_OFFS
778 (gpii->gpii_id),
779 GPI_GPII_n_CNTXT_INTSET_BMSK,
780 GPI_GPII_n_CNTXT_INTSET_SHFT,
781 0x01,
782 },
783 {
784 GPI_GPII_n_ERROR_LOG_OFFS
785 (gpii->gpii_id),
786 U32_MAX,
787 0,
788 0x00,
789 },
790 { 0 },
791 };
792
793 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
794 (gpii->configured_irq) ? 'F' : 'T',
795 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
796 (mask) ? 'T' : 'F');
797
798 if (gpii->configured_irq == false) {
799 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
800 gpi_handle_irq, IRQF_TRIGGER_HIGH,
801 gpii->label, gpii);
802 if (ret < 0) {
803 GPII_CRITIC(gpii, GPI_DBG_COMMON,
804 "error request irq:%d ret:%d\n",
805 gpii->irq, ret);
806 return ret;
807 }
808 }
809
810 if (settings == MASK_IEOB_SETTINGS) {
811 /*
812 * GPII only uses one EV ring per gpii so we can globally
813 * enable/disable IEOB interrupt
814 */
815 if (mask)
816 gpii->cntxt_type_irq_msk |=
817 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
818 else
819 gpii->cntxt_type_irq_msk &=
820 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
821 gpi_write_reg_field(gpii, gpii->regs +
822 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
823 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
824 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
825 gpii->cntxt_type_irq_msk);
826 } else {
827 for (i = 0; default_reg[i].offset; i++)
828 gpi_write_reg_field(gpii, gpii->regs +
829 default_reg[i].offset,
830 default_reg[i].mask,
831 default_reg[i].shift,
832 default_reg[i].val);
833 gpii->cntxt_type_irq_msk = def_type;
834 };
835
836 gpii->configured_irq = true;
837
838 return 0;
839}
840
841/* Sends gpii event or channel command */
842static int gpi_send_cmd(struct gpii *gpii,
843 struct gpii_chan *gpii_chan,
844 enum gpi_cmd gpi_cmd)
845{
846 u32 chid = MAX_CHANNELS_PER_GPII;
847 u32 cmd;
848 unsigned long timeout;
849 void __iomem *cmd_reg;
850
851 if (gpi_cmd >= GPI_MAX_CMD)
852 return -EINVAL;
853 if (IS_CHAN_CMD(gpi_cmd))
854 chid = gpii_chan->chid;
855
856 GPII_INFO(gpii, chid,
857 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
858
859 /* send opcode and wait for completion */
860 reinit_completion(&gpii->cmd_completion);
861 gpii->gpi_cmd = gpi_cmd;
862
863 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
864 gpii->ev_cmd_reg;
865 cmd = IS_CHAN_CMD(gpi_cmd) ?
866 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
867 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
868 gpi_write_reg(gpii, cmd_reg, cmd);
869 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
870 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
871
872 if (!timeout) {
873 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
874 TO_GPI_CMD_STR(gpi_cmd));
875 return -EIO;
876 }
877
878 /* confirm new ch state is correct , if the cmd is a state change cmd */
879 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
880 return 0;
881 if (IS_CHAN_CMD(gpi_cmd) &&
882 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
883 return 0;
884 if (!IS_CHAN_CMD(gpi_cmd) &&
885 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
886 return 0;
887
888 return -EIO;
889}
890
891/* program transfer ring DB register */
892static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
893 struct gpi_ring *ring,
894 void *wp)
895{
896 struct gpii *gpii = gpii_chan->gpii;
897 phys_addr_t p_wp;
898
899 p_wp = to_physical(ring, wp);
900 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
901}
902
903/* program event ring DB register */
904static inline void gpi_write_ev_db(struct gpii *gpii,
905 struct gpi_ring *ring,
906 void *wp)
907{
908 phys_addr_t p_wp;
909
910 p_wp = ring->phys_addr + (wp - ring->base);
911 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
912}
913
914/* notify client with generic event */
915static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
916 enum msm_gpi_cb_event event,
917 u64 status)
918{
919 struct gpii *gpii = gpii_chan->gpii;
920 struct gpi_client_info *client_info = &gpii_chan->client_info;
921 struct msm_gpi_cb msm_gpi_cb = {0};
922
923 GPII_ERR(gpii, gpii_chan->chid,
924 "notifying event:%s with status:%llu\n",
925 TO_GPI_CB_EVENT_STR(event), status);
926
927 msm_gpi_cb.cb_event = event;
928 msm_gpi_cb.status = status;
929 msm_gpi_cb.timestamp = sched_clock();
930 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
931 client_info->cb_param);
932}
933
934/* process transfer completion interrupt */
935static void gpi_process_ieob(struct gpii *gpii)
936{
937 u32 ieob_irq;
938
939 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
940 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
941 GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
942
943 /* process events based on priority */
944 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
945 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
946 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
947 tasklet_schedule(&gpii->ev_task);
948 } else {
949 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
950 gpi_process_events(gpii);
951 }
952}
953
954/* process channel control interrupt */
955static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
956{
957 u32 gpii_id = gpii->gpii_id;
958 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
959 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
960 u32 chid;
961 struct gpii_chan *gpii_chan;
962 u32 state;
963
964 /* clear the status */
965 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
966 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
967
968 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
969 if (!(BIT(chid) & ch_irq))
970 continue;
971
972 gpii_chan = &gpii->gpii_chan[chid];
973 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
974 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
975 CNTXT_0_CONFIG);
976 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
977 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
978
979 /*
980 * CH_CMD_DEALLOC cmd always successful. However cmd does
981 * not change hardware status. So overwriting software state
982 * to default state.
983 */
984 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
985 state = DEFAULT_CH_STATE;
986 gpii_chan->ch_state = state;
987 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
988 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
989
990 /*
991 * Triggering complete all if ch_state is not a stop in process.
992 * Stop in process is a transition state and we will wait for
993 * stop interrupt before notifying.
994 */
995 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
996 complete_all(&gpii->cmd_completion);
997
998 /* notifying clients if in error state */
999 if (gpii_chan->ch_state == CH_STATE_ERROR)
1000 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
1001 __LINE__);
1002 }
1003}
1004
1005/* processing gpi level error interrupts */
1006static void gpi_process_glob_err_irq(struct gpii *gpii)
1007{
1008 u32 gpii_id = gpii->gpii_id;
1009 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1010 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1011 u32 error_log;
1012 u32 chid;
1013 struct gpii_chan *gpii_chan;
1014 struct gpi_client_info *client_info;
1015 struct msm_gpi_cb msm_gpi_cb;
1016 struct gpi_error_log_entry *log_entry =
1017 (struct gpi_error_log_entry *)&error_log;
1018
1019 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1020 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1021
1022 /* only error interrupt should be set */
1023 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1024 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1025 irq_stts);
1026 goto error_irq;
1027 }
1028
1029 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1030 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1031 gpi_write_reg(gpii, gpii->regs + offset, 0);
1032
1033 /* get channel info */
1034 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1035 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1036 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1037 chid);
1038 goto error_irq;
1039 }
1040
1041 gpii_chan = &gpii->gpii_chan[chid];
1042 client_info = &gpii_chan->client_info;
1043
1044 /* notify client with error log */
1045 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1046 msm_gpi_cb.error_log.routine = log_entry->routine;
1047 msm_gpi_cb.error_log.type = log_entry->type;
1048 msm_gpi_cb.error_log.error_code = log_entry->code;
1049 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1050 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1051 GPII_ERR(gpii, gpii_chan->chid,
1052 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1053 log_entry->ee, log_entry->chtype,
1054 msm_gpi_cb.error_log.routine,
1055 msm_gpi_cb.error_log.type,
1056 msm_gpi_cb.error_log.error_code);
1057 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1058 client_info->cb_param);
1059
1060 return;
1061
1062error_irq:
1063 for (chid = 0, gpii_chan = gpii->gpii_chan;
1064 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1065 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1066 irq_stts);
1067}
1068
1069/* gpii interrupt handler */
1070static irqreturn_t gpi_handle_irq(int irq, void *data)
1071{
1072 struct gpii *gpii = data;
1073 u32 type;
1074 unsigned long flags;
1075 u32 offset;
1076 u32 gpii_id = gpii->gpii_id;
1077
1078 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1079
1080 read_lock_irqsave(&gpii->pm_lock, flags);
1081
1082 /*
1083 * States are out of sync to receive interrupt
1084 * while software state is in DISABLE state, bailing out.
1085 */
1086 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1087 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1088 "receive interrupt while in %s state\n",
1089 TO_GPI_PM_STR(gpii->pm_state));
1090 goto exit_irq;
1091 }
1092
1093 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1094 type = gpi_read_reg(gpii, gpii->regs + offset);
1095
1096 do {
1097 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1098 type);
1099 /* global gpii error */
1100 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1101 GPII_ERR(gpii, GPI_DBG_COMMON,
1102 "processing global error irq\n");
1103 gpi_process_glob_err_irq(gpii);
1104 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1105 }
1106
1107 /* event control irq */
1108 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1109 u32 ev_state;
1110 u32 ev_ch_irq;
1111
1112 GPII_INFO(gpii, GPI_DBG_COMMON,
1113 "processing EV CTRL interrupt\n");
1114 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1115 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1116
1117 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1118 (gpii_id);
1119 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1120 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1121 CNTXT_0_CONFIG);
1122 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1123 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1124
1125 /*
1126 * CMD EV_CMD_DEALLOC is always successful. However
1127 * cmd does not change hardware status. So overwriting
1128 * software state to default state.
1129 */
1130 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1131 ev_state = DEFAULT_EV_CH_STATE;
1132
1133 gpii->ev_state = ev_state;
1134 GPII_INFO(gpii, GPI_DBG_COMMON,
1135 "setting EV state to %s\n",
1136 TO_GPI_EV_STATE_STR(gpii->ev_state));
1137 complete_all(&gpii->cmd_completion);
1138 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1139 }
1140
1141 /* channel control irq */
1142 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1143 GPII_INFO(gpii, GPI_DBG_COMMON,
1144 "process CH CTRL interrupts\n");
1145 gpi_process_ch_ctrl_irq(gpii);
1146 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1147 }
1148
1149 /* transfer complete interrupt */
1150 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1151 GPII_VERB(gpii, GPI_DBG_COMMON,
1152 "process IEOB interrupts\n");
1153 gpi_process_ieob(gpii);
1154 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1155 }
1156
1157 if (type) {
1158 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1159 "Unhandled interrupt status:0x%x\n", type);
1160 goto exit_irq;
1161 }
1162 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1163 type = gpi_read_reg(gpii, gpii->regs + offset);
1164 } while (type);
1165
1166exit_irq:
1167 read_unlock_irqrestore(&gpii->pm_lock, flags);
1168 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1169
1170 return IRQ_HANDLED;
1171}
1172
1173/* process qup notification events */
1174static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1175 struct qup_notif_event *notif_event)
1176{
1177 struct gpii *gpii = gpii_chan->gpii;
1178 struct gpi_client_info *client_info = &gpii_chan->client_info;
1179 struct msm_gpi_cb msm_gpi_cb;
1180
1181 GPII_VERB(gpii, gpii_chan->chid,
1182 "status:0x%x time:0x%x count:0x%x\n",
1183 notif_event->status, notif_event->time, notif_event->count);
1184
1185 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1186 msm_gpi_cb.status = notif_event->status;
1187 msm_gpi_cb.timestamp = notif_event->time;
1188 msm_gpi_cb.count = notif_event->count;
1189 GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
1190 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1191 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1192 client_info->cb_param);
1193}
1194
1195/* process DMA Immediate completion data events */
1196static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1197 struct immediate_data_event *imed_event)
1198{
1199 struct gpii *gpii = gpii_chan->gpii;
1200 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001201 struct virt_dma_desc *vd;
1202 struct gpi_desc *gpi_desc;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001203 void *tre = ch_ring->base +
1204 (ch_ring->el_size * imed_event->tre_index);
1205 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
Sujeev Diase0197092017-11-27 20:36:26 -08001206 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001207
1208 /*
1209 * If channel not active don't process event but let
1210 * client know pending event is available
1211 */
1212 if (gpii_chan->pm_state != ACTIVE_STATE) {
1213 GPII_ERR(gpii, gpii_chan->chid,
1214 "skipping processing event because ch @ %s state\n",
1215 TO_GPI_PM_STR(gpii_chan->pm_state));
1216 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1217 __LINE__);
1218 return;
1219 }
1220
Sujeev Diase0197092017-11-27 20:36:26 -08001221 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001222 vd = vchan_next_desc(&gpii_chan->vc);
1223 if (!vd) {
1224 struct gpi_ere *gpi_ere;
1225 struct msm_gpi_tre *gpi_tre;
1226
Sujeev Diase0197092017-11-27 20:36:26 -08001227 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001228 GPII_ERR(gpii, gpii_chan->chid,
1229 "event without a pending descriptor!\n");
1230 gpi_ere = (struct gpi_ere *)imed_event;
1231 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1232 gpi_ere->dword[0], gpi_ere->dword[1],
1233 gpi_ere->dword[2], gpi_ere->dword[3]);
1234 gpi_tre = tre;
1235 GPII_ERR(gpii, gpii_chan->chid,
1236 "Pending TRE: %08x %08x %08x %08x\n",
1237 gpi_tre->dword[0], gpi_tre->dword[1],
1238 gpi_tre->dword[2], gpi_tre->dword[3]);
1239 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1240 __LINE__);
1241 return;
1242 }
1243 gpi_desc = to_gpi_desc(vd);
1244
1245 /* Event TR RP gen. don't match descriptor TR */
1246 if (gpi_desc->wp != tre) {
Sujeev Diase0197092017-11-27 20:36:26 -08001247 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001248 GPII_ERR(gpii, gpii_chan->chid,
1249 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1250 to_physical(ch_ring, gpi_desc->wp),
1251 to_physical(ch_ring, tre));
1252 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1253 __LINE__);
1254 return;
1255 }
1256
1257 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001258 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001259
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001260
1261 /*
1262 * RP pointed by Event is to last TRE processed,
1263 * we need to update ring rp to tre + 1
1264 */
1265 tre += ch_ring->el_size;
1266 if (tre >= (ch_ring->base + ch_ring->len))
1267 tre = ch_ring->base;
1268 ch_ring->rp = tre;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001269
1270 /* make sure rp updates are immediately visible to all cores */
1271 smp_wmb();
1272
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001273 tx_cb_param = vd->tx.callback_param;
1274 if (tx_cb_param) {
Sujeev Dias8fc26002017-11-29 20:51:40 -08001275 struct msm_gpi_tre *imed_tre = &tx_cb_param->imed_tre;
1276
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001277 GPII_VERB(gpii, gpii_chan->chid,
1278 "cb_length:%u compl_code:0x%x status:0x%x\n",
1279 imed_event->length, imed_event->code,
1280 imed_event->status);
Sujeev Dias8fc26002017-11-29 20:51:40 -08001281 /* Update immediate data if any from event */
1282 *imed_tre = *((struct msm_gpi_tre *)imed_event);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001283 tx_cb_param->length = imed_event->length;
1284 tx_cb_param->completion_code = imed_event->code;
1285 tx_cb_param->status = imed_event->status;
1286 }
1287
Sujeev Diase0197092017-11-27 20:36:26 -08001288 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001289 vchan_cookie_complete(vd);
Sujeev Diase0197092017-11-27 20:36:26 -08001290 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001291}
1292
1293/* processing transfer completion events */
1294static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1295 struct xfer_compl_event *compl_event)
1296{
1297 struct gpii *gpii = gpii_chan->gpii;
1298 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001299 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001300 struct virt_dma_desc *vd;
1301 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1302 struct gpi_desc *gpi_desc;
Sujeev Diase0197092017-11-27 20:36:26 -08001303 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001304
1305 /* only process events on active channel */
1306 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1307 GPII_ERR(gpii, gpii_chan->chid,
1308 "skipping processing event because ch @ %s state\n",
1309 TO_GPI_PM_STR(gpii_chan->pm_state));
1310 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1311 __LINE__);
1312 return;
1313 }
1314
Sujeev Diase0197092017-11-27 20:36:26 -08001315 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001316 vd = vchan_next_desc(&gpii_chan->vc);
1317 if (!vd) {
1318 struct gpi_ere *gpi_ere;
1319
Sujeev Diase0197092017-11-27 20:36:26 -08001320 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001321 GPII_ERR(gpii, gpii_chan->chid,
1322 "Event without a pending descriptor!\n");
1323 gpi_ere = (struct gpi_ere *)compl_event;
1324 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1325 gpi_ere->dword[0], gpi_ere->dword[1],
1326 gpi_ere->dword[2], gpi_ere->dword[3]);
1327 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1328 __LINE__);
1329 return;
1330 }
1331
1332 gpi_desc = to_gpi_desc(vd);
1333
1334 /* TRE Event generated didn't match descriptor's TRE */
1335 if (gpi_desc->wp != ev_rp) {
Sujeev Diase0197092017-11-27 20:36:26 -08001336 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001337 GPII_ERR(gpii, gpii_chan->chid,
1338 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1339 to_physical(ch_ring, gpi_desc->wp),
1340 to_physical(ch_ring, ev_rp));
1341 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1342 __LINE__);
1343 return;
1344 }
1345
1346 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001347 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001348
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001349
1350 /*
1351 * RP pointed by Event is to last TRE processed,
1352 * we need to update ring rp to ev_rp + 1
1353 */
1354 ev_rp += ch_ring->el_size;
1355 if (ev_rp >= (ch_ring->base + ch_ring->len))
1356 ev_rp = ch_ring->base;
1357 ch_ring->rp = ev_rp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001358
1359 /* update must be visible to other cores */
1360 smp_wmb();
1361
1362 tx_cb_param = vd->tx.callback_param;
1363 if (tx_cb_param) {
1364 GPII_VERB(gpii, gpii_chan->chid,
1365 "cb_length:%u compl_code:0x%x status:0x%x\n",
1366 compl_event->length, compl_event->code,
1367 compl_event->status);
1368 tx_cb_param->length = compl_event->length;
1369 tx_cb_param->completion_code = compl_event->code;
1370 tx_cb_param->status = compl_event->status;
1371 }
1372
Sujeev Diase0197092017-11-27 20:36:26 -08001373 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001374 vchan_cookie_complete(vd);
Sujeev Diase0197092017-11-27 20:36:26 -08001375 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001376}
1377
1378/* process all events */
1379static void gpi_process_events(struct gpii *gpii)
1380{
1381 struct gpi_ring *ev_ring = &gpii->ev_ring;
1382 u32 cntxt_rp, local_rp;
1383 union gpi_event *gpi_event;
1384 struct gpii_chan *gpii_chan;
1385 u32 chid, type;
1386 u32 ieob_irq;
1387
1388 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1389 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1390
1391 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
1392 cntxt_rp, local_rp);
1393
1394 do {
1395 while (local_rp != cntxt_rp) {
1396 gpi_event = ev_ring->rp;
1397 chid = gpi_event->xfer_compl_event.chid;
1398 type = gpi_event->xfer_compl_event.type;
1399 GPII_VERB(gpii, GPI_DBG_COMMON,
1400 "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
1401 local_rp, chid, type,
1402 gpi_event->gpi_ere.dword[0],
1403 gpi_event->gpi_ere.dword[1],
1404 gpi_event->gpi_ere.dword[2],
1405 gpi_event->gpi_ere.dword[3]);
1406
1407 switch (type) {
1408 case XFER_COMPLETE_EV_TYPE:
1409 gpii_chan = &gpii->gpii_chan[chid];
1410 gpi_process_xfer_compl_event(gpii_chan,
1411 &gpi_event->xfer_compl_event);
1412 break;
1413 case STALE_EV_TYPE:
1414 GPII_VERB(gpii, GPI_DBG_COMMON,
1415 "stale event, not processing\n");
1416 break;
1417 case IMMEDIATE_DATA_EV_TYPE:
1418 gpii_chan = &gpii->gpii_chan[chid];
1419 gpi_process_imed_data_event(gpii_chan,
1420 &gpi_event->immediate_data_event);
1421 break;
1422 case QUP_NOTIF_EV_TYPE:
1423 gpii_chan = &gpii->gpii_chan[chid];
1424 gpi_process_qup_notif_event(gpii_chan,
1425 &gpi_event->qup_notif_event);
1426 break;
1427 default:
1428 GPII_VERB(gpii, GPI_DBG_COMMON,
1429 "not supported event type:0x%x\n",
1430 type);
1431 }
1432 gpi_ring_recycle_ev_element(ev_ring);
1433 local_rp = (u32)to_physical(ev_ring,
1434 (void *)ev_ring->rp);
1435 }
1436 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1437
1438 /* clear pending IEOB events */
1439 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
1440 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
1441
1442 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1443 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1444
1445 } while (cntxt_rp != local_rp);
1446
1447 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
1448 local_rp);
1449}
1450
1451/* processing events using tasklet */
1452static void gpi_ev_tasklet(unsigned long data)
1453{
1454 struct gpii *gpii = (struct gpii *)data;
1455
1456 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1457
1458 read_lock_bh(&gpii->pm_lock);
1459 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1460 read_unlock_bh(&gpii->pm_lock);
1461 GPII_ERR(gpii, GPI_DBG_COMMON,
1462 "not processing any events, pm_state:%s\n",
1463 TO_GPI_PM_STR(gpii->pm_state));
1464 return;
1465 }
1466
1467 /* process the events */
1468 gpi_process_events(gpii);
1469
1470 /* enable IEOB, switching back to interrupts */
1471 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1472 read_unlock_bh(&gpii->pm_lock);
1473
1474 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1475}
1476
1477/* marks all pending events for the channel as stale */
1478void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1479{
1480 struct gpii *gpii = gpii_chan->gpii;
1481 struct gpi_ring *ev_ring = &gpii->ev_ring;
1482 void *ev_rp;
1483 u32 cntxt_rp, local_rp;
1484
1485 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1486 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1487
1488 ev_rp = ev_ring->rp;
1489 local_rp = (u32)to_physical(ev_ring, ev_rp);
1490 while (local_rp != cntxt_rp) {
1491 union gpi_event *gpi_event = ev_rp;
1492 u32 chid = gpi_event->xfer_compl_event.chid;
1493
1494 if (chid == gpii_chan->chid)
1495 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1496 ev_rp += ev_ring->el_size;
1497 if (ev_rp >= (ev_ring->base + ev_ring->len))
1498 ev_rp = ev_ring->base;
1499 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1500 local_rp = (u32)to_physical(ev_ring, ev_rp);
1501 }
1502}
1503
1504/* reset sw state and issue channel reset or de-alloc */
1505static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1506{
1507 struct gpii *gpii = gpii_chan->gpii;
1508 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001509 unsigned long flags;
1510 LIST_HEAD(list);
1511 int ret;
1512
1513 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1514 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1515 if (ret) {
1516 GPII_ERR(gpii, gpii_chan->chid,
1517 "Error with cmd:%s ret:%d\n",
1518 TO_GPI_CMD_STR(gpi_cmd), ret);
1519 return ret;
1520 }
1521
1522 /* initialize the local ring ptrs */
1523 ch_ring->rp = ch_ring->base;
1524 ch_ring->wp = ch_ring->base;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001525
1526 /* visible to other cores */
1527 smp_wmb();
1528
1529 /* check event ring for any stale events */
1530 write_lock_irq(&gpii->pm_lock);
1531 gpi_mark_stale_events(gpii_chan);
1532
1533 /* remove all async descriptors */
1534 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1535 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1536 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1537 write_unlock_irq(&gpii->pm_lock);
1538 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1539
1540 return 0;
1541}
1542
1543static int gpi_start_chan(struct gpii_chan *gpii_chan)
1544{
1545 struct gpii *gpii = gpii_chan->gpii;
1546 int ret;
1547
1548 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1549
1550 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1551 if (ret) {
1552 GPII_ERR(gpii, gpii_chan->chid,
1553 "Error with cmd:%s ret:%d\n",
1554 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1555 return ret;
1556 }
1557
1558 /* gpii CH is active now */
1559 write_lock_irq(&gpii->pm_lock);
1560 gpii_chan->pm_state = ACTIVE_STATE;
1561 write_unlock_irq(&gpii->pm_lock);
1562
1563 return 0;
1564}
1565
1566/* allocate and configure the transfer channel */
1567static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1568{
1569 struct gpii *gpii = gpii_chan->gpii;
1570 struct gpi_ring *ring = &gpii_chan->ch_ring;
1571 int i;
1572 int ret;
1573 struct {
1574 void *base;
1575 int offset;
1576 u32 val;
1577 } ch_reg[] = {
1578 {
1579 gpii_chan->ch_cntxt_base_reg,
1580 CNTXT_0_CONFIG,
1581 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1582 gpii_chan->dir,
1583 GPI_CHTYPE_PROTO_GPI),
1584 },
1585 {
1586 gpii_chan->ch_cntxt_base_reg,
1587 CNTXT_1_R_LENGTH,
1588 ring->len,
1589 },
1590 {
1591 gpii_chan->ch_cntxt_base_reg,
1592 CNTXT_2_RING_BASE_LSB,
1593 (u32)ring->phys_addr,
1594 },
1595 {
1596 gpii_chan->ch_cntxt_base_reg,
1597 CNTXT_3_RING_BASE_MSB,
1598 (u32)(ring->phys_addr >> 32),
1599 },
1600 { /* program MSB of DB register with ring base */
1601 gpii_chan->ch_cntxt_db_reg,
1602 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1603 (u32)(ring->phys_addr >> 32),
1604 },
1605 {
1606 gpii->regs,
1607 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1608 gpii_chan->chid),
1609 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1610 gpii_chan->protocol,
1611 gpii_chan->seid),
1612 },
1613 {
1614 gpii->regs,
1615 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1616 gpii_chan->chid),
1617 0,
1618 },
1619 {
1620 gpii->regs,
1621 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1622 gpii_chan->chid),
1623 0,
1624 },
1625 {
1626 gpii->regs,
1627 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1628 gpii_chan->chid),
1629 0,
1630 },
1631 {
1632 gpii->regs,
1633 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1634 gpii_chan->chid),
1635 1,
1636 },
1637 { NULL },
1638 };
1639
1640 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1641
1642 if (send_alloc_cmd) {
1643 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1644 if (ret) {
1645 GPII_ERR(gpii, gpii_chan->chid,
1646 "Error with cmd:%s ret:%d\n",
1647 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1648 return ret;
1649 }
1650 }
1651
1652 /* program channel cntxt registers */
1653 for (i = 0; ch_reg[i].base; i++)
1654 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1655 ch_reg[i].val);
1656 /* flush all the writes */
1657 wmb();
1658 return 0;
1659}
1660
1661/* allocate and configure event ring */
1662static int gpi_alloc_ev_chan(struct gpii *gpii)
1663{
1664 struct gpi_ring *ring = &gpii->ev_ring;
1665 int i;
1666 int ret;
1667 struct {
1668 void *base;
1669 int offset;
1670 u32 val;
1671 } ev_reg[] = {
1672 {
1673 gpii->ev_cntxt_base_reg,
1674 CNTXT_0_CONFIG,
1675 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1676 GPI_INTTYPE_IRQ,
1677 GPI_CHTYPE_GPI_EV),
1678 },
1679 {
1680 gpii->ev_cntxt_base_reg,
1681 CNTXT_1_R_LENGTH,
1682 ring->len,
1683 },
1684 {
1685 gpii->ev_cntxt_base_reg,
1686 CNTXT_2_RING_BASE_LSB,
1687 (u32)ring->phys_addr,
1688 },
1689 {
1690 gpii->ev_cntxt_base_reg,
1691 CNTXT_3_RING_BASE_MSB,
1692 (u32)(ring->phys_addr >> 32),
1693 },
1694 {
1695 /* program db msg with ring base msb */
1696 gpii->ev_cntxt_db_reg,
1697 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1698 (u32)(ring->phys_addr >> 32),
1699 },
1700 {
1701 gpii->ev_cntxt_base_reg,
1702 CNTXT_8_RING_INT_MOD,
1703 0,
1704 },
1705 {
1706 gpii->ev_cntxt_base_reg,
1707 CNTXT_10_RING_MSI_LSB,
1708 0,
1709 },
1710 {
1711 gpii->ev_cntxt_base_reg,
1712 CNTXT_11_RING_MSI_MSB,
1713 0,
1714 },
1715 {
1716 gpii->ev_cntxt_base_reg,
1717 CNTXT_8_RING_INT_MOD,
1718 0,
1719 },
1720 {
1721 gpii->ev_cntxt_base_reg,
1722 CNTXT_12_RING_RP_UPDATE_LSB,
1723 0,
1724 },
1725 {
1726 gpii->ev_cntxt_base_reg,
1727 CNTXT_13_RING_RP_UPDATE_MSB,
1728 0,
1729 },
1730 { NULL },
1731 };
1732
1733 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1734
1735 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1736 if (ret) {
1737 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1738 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1739 return ret;
1740 }
1741
1742 /* program event context */
1743 for (i = 0; ev_reg[i].base; i++)
1744 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1745 ev_reg[i].val);
1746
1747 /* add events to ring */
1748 ring->wp = (ring->base + ring->len - ring->el_size);
1749
1750 /* flush all the writes */
1751 wmb();
1752
1753 /* gpii is active now */
1754 write_lock_irq(&gpii->pm_lock);
1755 gpii->pm_state = ACTIVE_STATE;
1756 write_unlock_irq(&gpii->pm_lock);
1757 gpi_write_ev_db(gpii, ring, ring->wp);
1758
1759 return 0;
1760}
1761
1762/* calculate # of ERE/TRE available to queue */
1763static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1764{
1765 int elements = 0;
1766
1767 if (ring->wp < ring->rp)
1768 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1769 else {
1770 elements = (ring->rp - ring->base) / ring->el_size;
1771 elements += ((ring->base + ring->len - ring->wp) /
1772 ring->el_size) - 1;
1773 }
1774
1775 return elements;
1776}
1777
1778static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1779{
1780
1781 if (gpi_ring_num_elements_avail(ring) <= 0)
1782 return -ENOMEM;
1783
1784 *wp = ring->wp;
1785 ring->wp += ring->el_size;
1786 if (ring->wp >= (ring->base + ring->len))
1787 ring->wp = ring->base;
1788
1789 /* visible to other cores */
1790 smp_wmb();
1791
1792 return 0;
1793}
1794
1795static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1796{
1797 /* Update the WP */
1798 ring->wp += ring->el_size;
1799 if (ring->wp >= (ring->base + ring->len))
1800 ring->wp = ring->base;
1801
1802 /* Update the RP */
1803 ring->rp += ring->el_size;
1804 if (ring->rp >= (ring->base + ring->len))
1805 ring->rp = ring->base;
1806
1807 /* visible to other cores */
1808 smp_wmb();
1809}
1810
1811static void gpi_free_ring(struct gpi_ring *ring,
1812 struct gpii *gpii)
1813{
Sujeev Dias8fc26002017-11-29 20:51:40 -08001814 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1815 ring->pre_aligned, ring->dma_handle);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001816 memset(ring, 0, sizeof(*ring));
1817}
1818
1819/* allocate memory for transfer and event rings */
1820static int gpi_alloc_ring(struct gpi_ring *ring,
1821 u32 elements,
1822 u32 el_size,
Sujeev Dias8fc26002017-11-29 20:51:40 -08001823 struct gpii *gpii)
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001824{
1825 u64 len = elements * el_size;
1826 int bit;
1827
Sujeev Dias8fc26002017-11-29 20:51:40 -08001828 /* ring len must be power of 2 */
1829 bit = find_last_bit((unsigned long *)&len, 32);
1830 if (((1 << bit) - 1) & len)
1831 bit++;
1832 len = 1 << bit;
1833 ring->alloc_size = (len + (len - 1));
1834 GPII_INFO(gpii, GPI_DBG_COMMON,
1835 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
1836 elements, el_size, (elements * el_size), len,
1837 ring->alloc_size);
1838 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1839 ring->alloc_size,
1840 &ring->dma_handle, GFP_KERNEL);
1841 if (!ring->pre_aligned) {
1842 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1843 "could not alloc size:%lu mem for ring\n",
1844 ring->alloc_size);
1845 return -ENOMEM;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001846 }
1847
Sujeev Dias8fc26002017-11-29 20:51:40 -08001848 /* align the physical mem */
1849 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1850 ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001851 ring->rp = ring->base;
1852 ring->wp = ring->base;
1853 ring->len = len;
1854 ring->el_size = el_size;
1855 ring->elements = ring->len / ring->el_size;
1856 memset(ring->base, 0, ring->len);
1857 ring->configured = true;
1858
1859 /* update to other cores */
1860 smp_wmb();
1861
1862 GPII_INFO(gpii, GPI_DBG_COMMON,
1863 "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
1864 ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
1865 ring->elements);
1866
1867 return 0;
1868}
1869
1870/* copy tre into transfer ring */
1871static void gpi_queue_xfer(struct gpii *gpii,
1872 struct gpii_chan *gpii_chan,
1873 struct msm_gpi_tre *gpi_tre,
Sujeev Dias8fc26002017-11-29 20:51:40 -08001874 void **wp)
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001875{
1876 struct msm_gpi_tre *ch_tre;
1877 int ret;
1878
1879 /* get next tre location we can copy */
1880 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1881 if (unlikely(ret)) {
1882 GPII_CRITIC(gpii, gpii_chan->chid,
1883 "Error adding ring element to xfer ring\n");
1884 return;
1885 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001886
1887 /* copy the tre info */
1888 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001889 *wp = ch_tre;
1890}
1891
1892/* reset and restart transfer channel */
1893int gpi_terminate_all(struct dma_chan *chan)
1894{
1895 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1896 struct gpii *gpii = gpii_chan->gpii;
1897 int schid, echid, i;
1898 int ret = 0;
1899
1900 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1901 mutex_lock(&gpii->ctrl_lock);
1902
1903 /*
1904 * treat both channels as a group if its protocol is not UART
1905 * STOP, RESET, or START needs to be in lockstep
1906 */
1907 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1908 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1909 MAX_CHANNELS_PER_GPII;
1910
1911 /* stop the channel */
1912 for (i = schid; i < echid; i++) {
1913 gpii_chan = &gpii->gpii_chan[i];
1914
1915 /* disable ch state so no more TRE processing */
1916 write_lock_irq(&gpii->pm_lock);
1917 gpii_chan->pm_state = PREPARE_TERMINATE;
1918 write_unlock_irq(&gpii->pm_lock);
1919
1920 /* send command to Stop the channel */
1921 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1922 if (ret)
1923 GPII_ERR(gpii, gpii_chan->chid,
1924 "Error Stopping Channel:%d resetting anyway\n",
1925 ret);
1926 }
1927
1928 /* reset the channels (clears any pending tre) */
1929 for (i = schid; i < echid; i++) {
1930 gpii_chan = &gpii->gpii_chan[i];
1931
1932 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1933 if (ret) {
1934 GPII_ERR(gpii, gpii_chan->chid,
1935 "Error resetting channel ret:%d\n", ret);
1936 goto terminate_exit;
1937 }
1938
1939 /* reprogram channel CNTXT */
1940 ret = gpi_alloc_chan(gpii_chan, false);
1941 if (ret) {
1942 GPII_ERR(gpii, gpii_chan->chid,
1943 "Error alloc_channel ret:%d\n", ret);
1944 goto terminate_exit;
1945 }
1946 }
1947
1948 /* restart the channels */
1949 for (i = schid; i < echid; i++) {
1950 gpii_chan = &gpii->gpii_chan[i];
1951
1952 ret = gpi_start_chan(gpii_chan);
1953 if (ret) {
1954 GPII_ERR(gpii, gpii_chan->chid,
1955 "Error Starting Channel ret:%d\n", ret);
1956 goto terminate_exit;
1957 }
1958 }
1959
1960terminate_exit:
1961 mutex_unlock(&gpii->ctrl_lock);
1962 return ret;
1963}
1964
1965/* pause dma transfer for all channels */
1966static int gpi_pause(struct dma_chan *chan)
1967{
1968 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1969 struct gpii *gpii = gpii_chan->gpii;
1970 int i, ret;
1971
1972 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1973 mutex_lock(&gpii->ctrl_lock);
1974
1975 /*
1976 * pause/resume are per gpii not per channel, so
1977 * client needs to call pause only once
1978 */
1979 if (gpii->pm_state == PAUSE_STATE) {
1980 GPII_INFO(gpii, gpii_chan->chid,
1981 "channel is already paused\n");
1982 mutex_unlock(&gpii->ctrl_lock);
1983 return 0;
1984 }
1985
1986 /* send stop command to stop the channels */
1987 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
1988 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
1989 if (ret) {
1990 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
1991 "Error stopping chan, ret:%d\n", ret);
1992 mutex_unlock(&gpii->ctrl_lock);
1993 return ret;
1994 }
1995 }
1996
1997 disable_irq(gpii->irq);
1998
1999 /* Wait for threads to complete out */
2000 tasklet_kill(&gpii->ev_task);
2001
2002 write_lock_irq(&gpii->pm_lock);
2003 gpii->pm_state = PAUSE_STATE;
2004 write_unlock_irq(&gpii->pm_lock);
2005 mutex_unlock(&gpii->ctrl_lock);
2006
2007 return 0;
2008}
2009
2010/* resume dma transfer */
2011static int gpi_resume(struct dma_chan *chan)
2012{
2013 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2014 struct gpii *gpii = gpii_chan->gpii;
2015 int i;
2016 int ret;
2017
2018 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2019
2020 mutex_lock(&gpii->ctrl_lock);
2021 if (gpii->pm_state == ACTIVE_STATE) {
2022 GPII_INFO(gpii, gpii_chan->chid,
2023 "channel is already active\n");
2024 mutex_unlock(&gpii->ctrl_lock);
2025 return 0;
2026 }
2027
2028 enable_irq(gpii->irq);
2029
2030 /* send start command to start the channels */
2031 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2032 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2033 if (ret) {
2034 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2035 "Erro starting chan, ret:%d\n", ret);
2036 mutex_unlock(&gpii->ctrl_lock);
2037 return ret;
2038 }
2039 }
2040
2041 write_lock_irq(&gpii->pm_lock);
2042 gpii->pm_state = ACTIVE_STATE;
2043 write_unlock_irq(&gpii->pm_lock);
2044 mutex_unlock(&gpii->ctrl_lock);
2045
2046 return 0;
2047}
2048
2049void gpi_desc_free(struct virt_dma_desc *vd)
2050{
2051 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2052
2053 kfree(gpi_desc);
2054}
2055
2056/* copy tre into transfer ring */
2057struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2058 struct scatterlist *sgl,
2059 unsigned int sg_len,
2060 enum dma_transfer_direction direction,
2061 unsigned long flags,
2062 void *context)
2063{
2064 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2065 struct gpii *gpii = gpii_chan->gpii;
Sujeev Dias8fc26002017-11-29 20:51:40 -08002066 u32 nr;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002067 u32 nr_req = 0;
2068 int i, j;
2069 struct scatterlist *sg;
2070 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002071 void *tre, *wp = NULL;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002072 const gfp_t gfp = GFP_ATOMIC;
2073 struct gpi_desc *gpi_desc;
2074
2075 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2076
2077 if (!is_slave_direction(direction)) {
2078 GPII_ERR(gpii, gpii_chan->chid,
2079 "invalid dma direction: %d\n", direction);
2080 return NULL;
2081 }
2082
2083 /* calculate # of elements required & available */
2084 nr = gpi_ring_num_elements_avail(ch_ring);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002085 for_each_sg(sgl, sg, sg_len, i) {
2086 GPII_VERB(gpii, gpii_chan->chid,
2087 "%d of %u len:%u\n", i, sg_len, sg->length);
2088 nr_req += (sg->length / ch_ring->el_size);
2089 }
Sujeev Dias8fc26002017-11-29 20:51:40 -08002090 GPII_VERB(gpii, gpii_chan->chid, "el avail:%u req:%u\n", nr, nr_req);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002091
Sujeev Dias8fc26002017-11-29 20:51:40 -08002092 if (nr < nr_req) {
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002093 GPII_ERR(gpii, gpii_chan->chid,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002094 "not enough space in ring, avail:%u required:%u\n",
2095 nr, nr_req);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002096 return NULL;
2097 }
2098
2099 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2100 if (!gpi_desc) {
2101 GPII_ERR(gpii, gpii_chan->chid,
2102 "out of memory for descriptor\n");
2103 return NULL;
2104 }
2105
2106 /* copy each tre into transfer ring */
2107 for_each_sg(sgl, sg, sg_len, i)
2108 for (j = 0, tre = sg_virt(sg); j < sg->length;
2109 j += ch_ring->el_size, tre += ch_ring->el_size)
Sujeev Dias8fc26002017-11-29 20:51:40 -08002110 gpi_queue_xfer(gpii, gpii_chan, tre, &wp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002111
2112 /* set up the descriptor */
2113 gpi_desc->db = ch_ring->wp;
2114 gpi_desc->wp = wp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002115 gpi_desc->gpii_chan = gpii_chan;
2116 GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
2117 to_physical(ch_ring, ch_ring->wp),
2118 to_physical(ch_ring, ch_ring->rp));
2119
2120 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2121}
2122
2123/* rings transfer ring db to being transfer */
2124static void gpi_issue_pending(struct dma_chan *chan)
2125{
2126 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2127 struct gpii *gpii = gpii_chan->gpii;
2128 unsigned long flags, pm_lock_flags;
2129 struct virt_dma_desc *vd = NULL;
2130 struct gpi_desc *gpi_desc;
2131
2132 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2133
2134 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2135
2136 /* move all submitted discriptors to issued list */
2137 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2138 if (vchan_issue_pending(&gpii_chan->vc))
2139 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2140 struct virt_dma_desc, node);
2141 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2142
2143 /* nothing to do list is empty */
2144 if (!vd) {
2145 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2146 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2147 return;
2148 }
2149
2150 gpi_desc = to_gpi_desc(vd);
2151 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2152 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2153}
2154
2155/* configure or issue async command */
2156static int gpi_config(struct dma_chan *chan,
2157 struct dma_slave_config *config)
2158{
2159 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2160 struct gpii *gpii = gpii_chan->gpii;
2161 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2162 const int ev_factor = gpii->gpi_dev->ev_factor;
2163 u32 elements;
2164 int i = 0;
2165 int ret = 0;
2166
2167 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2168 if (!gpi_ctrl) {
2169 GPII_ERR(gpii, gpii_chan->chid,
2170 "no config ctrl data provided");
2171 return -EINVAL;
2172 }
2173
2174 mutex_lock(&gpii->ctrl_lock);
2175
2176 switch (gpi_ctrl->cmd) {
2177 case MSM_GPI_INIT:
2178 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2179
2180 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2181 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2182 gpii_chan->pm_state = CONFIG_STATE;
2183
2184 /* check if both channels are configured before continue */
2185 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2186 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2187 goto exit_gpi_init;
2188
2189 /* configure to highest priority from two channels */
2190 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2191 gpii->gpii_chan[1].priority);
2192
2193 /* protocol must be same for both channels */
2194 if (gpii->gpii_chan[0].protocol !=
2195 gpii->gpii_chan[1].protocol) {
2196 GPII_ERR(gpii, gpii_chan->chid,
2197 "protocol did not match protocol %u != %u\n",
2198 gpii->gpii_chan[0].protocol,
2199 gpii->gpii_chan[1].protocol);
2200 ret = -EINVAL;
2201 goto exit_gpi_init;
2202 }
2203 gpii->protocol = gpii_chan->protocol;
2204
2205 /* allocate memory for event ring */
2206 elements = max(gpii->gpii_chan[0].req_tres,
2207 gpii->gpii_chan[1].req_tres);
2208 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002209 sizeof(union gpi_event), gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002210 if (ret) {
2211 GPII_ERR(gpii, gpii_chan->chid,
2212 "error allocating mem for ev ring\n");
2213 goto exit_gpi_init;
2214 }
2215
2216 /* configure interrupts */
2217 write_lock_irq(&gpii->pm_lock);
2218 gpii->pm_state = PREPARE_HARDWARE;
2219 write_unlock_irq(&gpii->pm_lock);
2220 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2221 if (ret) {
2222 GPII_ERR(gpii, gpii_chan->chid,
2223 "error config. interrupts, ret:%d\n", ret);
2224 goto error_config_int;
2225 }
2226
2227 /* allocate event rings */
2228 ret = gpi_alloc_ev_chan(gpii);
2229 if (ret) {
2230 GPII_ERR(gpii, gpii_chan->chid,
2231 "error alloc_ev_chan:%d\n", ret);
2232 goto error_alloc_ev_ring;
2233 }
2234
2235 /* Allocate all channels */
2236 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2237 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2238 if (ret) {
2239 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2240 "Error allocating chan:%d\n", ret);
2241 goto error_alloc_chan;
2242 }
2243 }
2244
2245 /* start channels */
2246 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2247 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2248 if (ret) {
2249 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2250 "Error start chan:%d\n", ret);
2251 goto error_start_chan;
2252 }
2253 }
2254
2255 break;
2256 case MSM_GPI_CMD_UART_SW_STALE:
2257 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2258 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2259 break;
2260 case MSM_GPI_CMD_UART_RFR_READY:
2261 GPII_INFO(gpii, gpii_chan->chid,
2262 "sending UART RFR READY cmd\n");
2263 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2264 break;
2265 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2266 GPII_INFO(gpii, gpii_chan->chid,
2267 "sending UART RFR READY NOT READY cmd\n");
2268 ret = gpi_send_cmd(gpii, gpii_chan,
2269 GPI_CH_CMD_UART_RFR_NOT_READY);
2270 break;
2271 default:
2272 GPII_ERR(gpii, gpii_chan->chid,
2273 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2274 ret = -EINVAL;
2275 }
2276
2277 mutex_unlock(&gpii->ctrl_lock);
2278 return ret;
2279
2280error_start_chan:
2281 for (i = i - 1; i >= 0; i++) {
2282 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2283 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2284 }
2285 i = 2;
2286error_alloc_chan:
2287 for (i = i - 1; i >= 0; i--)
2288 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2289error_alloc_ev_ring:
2290 gpi_disable_interrupts(gpii);
2291error_config_int:
2292 gpi_free_ring(&gpii->ev_ring, gpii);
2293exit_gpi_init:
2294 mutex_unlock(&gpii->ctrl_lock);
2295 return ret;
2296}
2297
2298/* release all channel resources */
2299static void gpi_free_chan_resources(struct dma_chan *chan)
2300{
2301 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2302 struct gpii *gpii = gpii_chan->gpii;
2303 enum gpi_pm_state cur_state;
2304 int ret, i;
2305
2306 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2307
2308 mutex_lock(&gpii->ctrl_lock);
2309
2310 cur_state = gpii_chan->pm_state;
2311
2312 /* disable ch state so no more TRE processing for this channel */
2313 write_lock_irq(&gpii->pm_lock);
2314 gpii_chan->pm_state = PREPARE_TERMINATE;
2315 write_unlock_irq(&gpii->pm_lock);
2316
2317 /* attemp to do graceful hardware shutdown */
2318 if (cur_state == ACTIVE_STATE) {
2319 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2320 if (ret)
2321 GPII_ERR(gpii, gpii_chan->chid,
2322 "error stopping channel:%d\n", ret);
2323
2324 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2325 if (ret)
2326 GPII_ERR(gpii, gpii_chan->chid,
2327 "error resetting channel:%d\n", ret);
2328
2329 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2330 }
2331
2332 /* free all allocated memory */
2333 gpi_free_ring(&gpii_chan->ch_ring, gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002334 vchan_free_chan_resources(&gpii_chan->vc);
2335
2336 write_lock_irq(&gpii->pm_lock);
2337 gpii_chan->pm_state = DISABLE_STATE;
2338 write_unlock_irq(&gpii->pm_lock);
2339
2340 /* if other rings are still active exit */
2341 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2342 if (gpii->gpii_chan[i].ch_ring.configured)
2343 goto exit_free;
2344
2345 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2346
2347 /* deallocate EV Ring */
2348 cur_state = gpii->pm_state;
2349 write_lock_irq(&gpii->pm_lock);
2350 gpii->pm_state = PREPARE_TERMINATE;
2351 write_unlock_irq(&gpii->pm_lock);
2352
2353 /* wait for threads to complete out */
2354 tasklet_kill(&gpii->ev_task);
2355
2356 /* send command to de allocate event ring */
2357 if (cur_state == ACTIVE_STATE)
2358 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2359
2360 gpi_free_ring(&gpii->ev_ring, gpii);
2361
2362 /* disable interrupts */
2363 if (cur_state == ACTIVE_STATE)
2364 gpi_disable_interrupts(gpii);
2365
2366 /* set final state to disable */
2367 write_lock_irq(&gpii->pm_lock);
2368 gpii->pm_state = DISABLE_STATE;
2369 write_unlock_irq(&gpii->pm_lock);
2370
2371exit_free:
2372 mutex_unlock(&gpii->ctrl_lock);
2373}
2374
2375/* allocate channel resources */
2376static int gpi_alloc_chan_resources(struct dma_chan *chan)
2377{
2378 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2379 struct gpii *gpii = gpii_chan->gpii;
2380 int ret;
2381
2382 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2383
2384 mutex_lock(&gpii->ctrl_lock);
2385
2386 /* allocate memory for transfer ring */
2387 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002388 sizeof(struct msm_gpi_tre), gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002389 if (ret) {
2390 GPII_ERR(gpii, gpii_chan->chid,
2391 "error allocating xfer ring, ret:%d\n", ret);
2392 goto xfer_alloc_err;
2393 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002394 mutex_unlock(&gpii->ctrl_lock);
2395
2396 return 0;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002397xfer_alloc_err:
2398 mutex_unlock(&gpii->ctrl_lock);
2399
2400 return ret;
2401}
2402
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002403static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2404{
2405 int gpii;
2406 struct gpii_chan *tx_chan, *rx_chan;
2407
2408 /* check if same seid is already configured for another chid */
2409 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2410 if (!((1 << gpii) & gpi_dev->gpii_mask))
2411 continue;
2412
2413 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2414 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2415
2416 if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2417 return gpii;
2418 if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2419 return gpii;
2420 }
2421
2422 /* no channels configured with same seid, return next avail gpii */
2423 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2424 if (!((1 << gpii) & gpi_dev->gpii_mask))
2425 continue;
2426
2427 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2428 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2429
2430 /* check if gpii is configured */
2431 if (tx_chan->vc.chan.client_count ||
2432 rx_chan->vc.chan.client_count)
2433 continue;
2434
2435 /* found a free gpii */
2436 return gpii;
2437 }
2438
2439 /* no gpii instance available to use */
2440 return -EIO;
2441}
2442
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002443/* gpi_of_dma_xlate: open client requested channel */
2444static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2445 struct of_dma *of_dma)
2446{
2447 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002448 u32 seid, chid;
2449 int gpii;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002450 struct gpii_chan *gpii_chan;
2451
2452 if (args->args_count < REQ_OF_DMA_ARGS) {
2453 GPI_ERR(gpi_dev,
2454 "gpii require minimum 6 args, client passed:%d args\n",
2455 args->args_count);
2456 return NULL;
2457 }
2458
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002459 chid = args->args[0];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002460 if (chid >= MAX_CHANNELS_PER_GPII) {
2461 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2462 return NULL;
2463 }
2464
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002465 seid = args->args[1];
2466
2467 /* find next available gpii to use */
2468 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2469 if (gpii < 0) {
2470 GPI_ERR(gpi_dev, "no available gpii instances\n");
2471 return NULL;
2472 }
2473
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002474 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002475 if (gpii_chan->vc.chan.client_count) {
2476 GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
2477 gpii, chid, gpii_chan->seid);
2478 return NULL;
2479 }
2480
2481 /* get ring size, protocol, se_id, and priority */
2482 gpii_chan->seid = seid;
2483 gpii_chan->protocol = args->args[2];
2484 gpii_chan->req_tres = args->args[3];
2485 gpii_chan->priority = args->args[4];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002486
2487 GPI_LOG(gpi_dev,
2488 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2489 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2490 gpii_chan->protocol);
2491
2492 return dma_get_slave_channel(&gpii_chan->vc.chan);
2493}
2494
2495/* gpi_setup_debug - setup debug capabilities */
2496static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2497{
2498 char node_name[GPI_LABEL_SIZE];
2499 const umode_t mode = 0600;
2500 int i;
2501
2502 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2503 (u64)gpi_dev->res->start);
2504
2505 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2506 node_name, 0);
2507 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2508 if (!IS_ERR_OR_NULL(pdentry)) {
2509 snprintf(node_name, sizeof(node_name), "%llx",
2510 (u64)gpi_dev->res->start);
2511 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2512 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2513 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2514 &gpi_dev->ipc_log_lvl);
2515 debugfs_create_u32("klog_lvl", mode,
2516 gpi_dev->dentry, &gpi_dev->klog_lvl);
2517 }
2518 }
2519
2520 for (i = 0; i < gpi_dev->max_gpii; i++) {
2521 struct gpii *gpii;
2522
2523 if (!((1 << i) & gpi_dev->gpii_mask))
2524 continue;
2525
2526 gpii = &gpi_dev->gpiis[i];
2527 snprintf(gpii->label, sizeof(gpii->label),
2528 "%s%llx_gpii%d",
2529 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2530 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2531 gpii->label, 0);
2532 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2533 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2534
2535 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2536 continue;
2537
2538 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2539 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2540 if (IS_ERR_OR_NULL(gpii->dentry))
2541 continue;
2542
2543 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2544 &gpii->ipc_log_lvl);
2545 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2546 &gpii->klog_lvl);
2547 }
2548}
2549
Sujeev Dias69484212017-08-31 10:06:53 -07002550static struct dma_iommu_mapping *gpi_create_mapping(struct gpi_dev *gpi_dev)
2551{
2552 dma_addr_t base;
2553 size_t size;
2554
2555 /*
2556 * If S1_BYPASS enabled then iommu space is not used, however framework
2557 * still require clients to create a mapping space before attaching. So
2558 * set to smallest size required by iommu framework.
2559 */
2560 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2561 base = 0;
2562 size = PAGE_SIZE;
2563 } else {
2564 base = gpi_dev->iova_base;
2565 size = gpi_dev->iova_size;
2566 }
2567
2568 GPI_LOG(gpi_dev, "Creating iommu mapping of base:0x%llx size:%lu\n",
2569 base, size);
2570
2571 return arm_iommu_create_mapping(&platform_bus_type, base, size);
2572}
2573
2574static int gpi_dma_mask(struct gpi_dev *gpi_dev)
2575{
2576 int mask = 64;
2577
2578 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2579 unsigned long addr;
2580
2581 addr = gpi_dev->iova_base + gpi_dev->iova_size + 1;
2582 mask = find_last_bit(&addr, 64);
2583 }
2584
2585 GPI_LOG(gpi_dev, "Setting dma mask to %d\n", mask);
2586
2587 return dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(mask));
2588}
2589
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002590static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2591{
Sujeev Dias69484212017-08-31 10:06:53 -07002592 struct dma_iommu_mapping *mapping = NULL;
2593 int ret;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002594
Sujeev Dias69484212017-08-31 10:06:53 -07002595 if (gpi_dev->smmu_cfg) {
2596
2597 /* create mapping table */
2598 mapping = gpi_create_mapping(gpi_dev);
2599 if (IS_ERR(mapping)) {
2600 GPI_ERR(gpi_dev,
2601 "Failed to create iommu mapping, ret:%ld\n",
2602 PTR_ERR(mapping));
2603 return PTR_ERR(mapping);
2604 }
2605
2606 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2607 int s1_bypass = 1;
2608
2609 ret = iommu_domain_set_attr(mapping->domain,
2610 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
2611 if (ret) {
2612 GPI_ERR(gpi_dev,
2613 "Failed to set attr S1_BYPASS, ret:%d\n",
2614 ret);
2615 goto release_mapping;
2616 }
2617 }
2618
2619 if (gpi_dev->smmu_cfg & GPI_SMMU_FAST) {
2620 int fast = 1;
2621
2622 ret = iommu_domain_set_attr(mapping->domain,
2623 DOMAIN_ATTR_FAST, &fast);
2624 if (ret) {
2625 GPI_ERR(gpi_dev,
2626 "Failed to set attr FAST, ret:%d\n",
2627 ret);
2628 goto release_mapping;
2629 }
2630 }
2631
2632 if (gpi_dev->smmu_cfg & GPI_SMMU_ATOMIC) {
2633 int atomic = 1;
2634
2635 ret = iommu_domain_set_attr(mapping->domain,
2636 DOMAIN_ATTR_ATOMIC, &atomic);
2637 if (ret) {
2638 GPI_ERR(gpi_dev,
2639 "Failed to set attr ATOMIC, ret:%d\n",
2640 ret);
2641 goto release_mapping;
2642 }
2643 }
2644
2645 ret = arm_iommu_attach_device(gpi_dev->dev, mapping);
2646 if (ret) {
2647 GPI_ERR(gpi_dev,
2648 "Failed with iommu_attach, ret:%d\n", ret);
2649 goto release_mapping;
2650 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002651 }
2652
Sujeev Dias69484212017-08-31 10:06:53 -07002653 ret = gpi_dma_mask(gpi_dev);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002654 if (ret) {
Sujeev Dias69484212017-08-31 10:06:53 -07002655 GPI_ERR(gpi_dev, "Error setting dma_mask, ret:%d\n", ret);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002656 goto error_set_mask;
2657 }
2658
2659 return ret;
2660
2661error_set_mask:
Sujeev Dias69484212017-08-31 10:06:53 -07002662 if (gpi_dev->smmu_cfg)
2663 arm_iommu_detach_device(gpi_dev->dev);
2664release_mapping:
2665 if (mapping)
2666 arm_iommu_release_mapping(mapping);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002667 return ret;
2668}
2669
2670static int gpi_probe(struct platform_device *pdev)
2671{
2672 struct gpi_dev *gpi_dev;
2673 int ret, i;
2674
2675 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2676 if (!gpi_dev)
2677 return -ENOMEM;
2678
2679 gpi_dev->dev = &pdev->dev;
2680 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2681 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2682 "gpi-top");
2683 if (!gpi_dev->res) {
2684 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2685 return -EINVAL;
2686 }
2687 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2688 resource_size(gpi_dev->res));
2689 if (!gpi_dev->regs) {
2690 GPI_ERR(gpi_dev, "IO remap failed\n");
2691 return -EFAULT;
2692 }
2693
2694 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2695 &gpi_dev->max_gpii);
2696 if (ret) {
2697 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2698 return ret;
2699 }
2700
2701 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2702 &gpi_dev->gpii_mask);
2703 if (ret) {
2704 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2705 return ret;
2706 }
2707
2708 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2709 &gpi_dev->ev_factor);
2710 if (ret) {
2711 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2712 return ret;
2713 }
2714
Sujeev Dias69484212017-08-31 10:06:53 -07002715 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,smmu-cfg",
2716 &gpi_dev->smmu_cfg);
2717 if (ret) {
2718 GPI_ERR(gpi_dev, "missing 'qcom,smmu-cfg' DT node\n");
2719 return ret;
2720 }
2721 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2722 u64 iova_range[2];
2723
2724 ret = of_property_count_elems_of_size(gpi_dev->dev->of_node,
2725 "qcom,iova-range",
2726 sizeof(iova_range));
2727 if (ret != 1) {
2728 GPI_ERR(gpi_dev,
2729 "missing or incorrect 'qcom,iova-range' DT node ret:%d\n",
2730 ret);
2731 }
2732
2733 ret = of_property_read_u64_array(gpi_dev->dev->of_node,
2734 "qcom,iova-range", iova_range,
2735 sizeof(iova_range) / sizeof(u64));
2736 if (ret) {
2737 GPI_ERR(gpi_dev,
2738 "could not read DT prop 'qcom,iova-range\n");
2739 return ret;
2740 }
2741 gpi_dev->iova_base = iova_range[0];
2742 gpi_dev->iova_size = iova_range[1];
2743 }
2744
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002745 ret = gpi_smmu_init(gpi_dev);
2746 if (ret) {
2747 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2748 return ret;
2749 }
2750
2751 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2752 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2753 GFP_KERNEL);
2754 if (!gpi_dev->gpiis)
2755 return -ENOMEM;
2756
2757
2758 /* setup all the supported gpii */
2759 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2760 for (i = 0; i < gpi_dev->max_gpii; i++) {
2761 struct gpii *gpii = &gpi_dev->gpiis[i];
2762 int chan;
2763
2764 if (!((1 << i) & gpi_dev->gpii_mask))
2765 continue;
2766
2767 /* set up ev cntxt register map */
2768 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2769 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2770 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2771 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2772 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2773 CNTXT_2_RING_BASE_LSB;
2774 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2775 CNTXT_4_RING_RP_LSB;
2776 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2777 CNTXT_6_RING_WP_LSB;
2778 gpii->ev_cmd_reg = gpi_dev->regs +
2779 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2780 gpii->ieob_src_reg = gpi_dev->regs +
2781 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2782 gpii->ieob_clr_reg = gpi_dev->regs +
2783 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2784
2785 /* set up irq */
2786 ret = platform_get_irq(pdev, i);
2787 if (ret < 0) {
2788 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2789 i, ret);
2790 return ret;
2791 }
2792 gpii->irq = ret;
2793
2794 /* set up channel specific register info */
2795 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2796 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2797
2798 /* set up ch cntxt register map */
2799 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2800 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2801 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2802 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2803 gpii_chan->ch_ring_base_lsb_reg =
2804 gpii_chan->ch_cntxt_base_reg +
2805 CNTXT_2_RING_BASE_LSB;
2806 gpii_chan->ch_ring_rp_lsb_reg =
2807 gpii_chan->ch_cntxt_base_reg +
2808 CNTXT_4_RING_RP_LSB;
2809 gpii_chan->ch_ring_wp_lsb_reg =
2810 gpii_chan->ch_cntxt_base_reg +
2811 CNTXT_6_RING_WP_LSB;
2812 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2813 GPI_GPII_n_CH_CMD_OFFS(i);
2814
2815 /* vchan setup */
2816 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2817 gpii_chan->vc.desc_free = gpi_desc_free;
2818 gpii_chan->chid = chan;
2819 gpii_chan->gpii = gpii;
2820 gpii_chan->dir = GPII_CHAN_DIR[chan];
2821 }
2822 mutex_init(&gpii->ctrl_lock);
2823 rwlock_init(&gpii->pm_lock);
2824 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2825 (unsigned long)gpii);
2826 init_completion(&gpii->cmd_completion);
2827 gpii->gpii_id = i;
2828 gpii->regs = gpi_dev->regs;
2829 gpii->gpi_dev = gpi_dev;
2830 atomic_set(&gpii->dbg_index, 0);
2831 }
2832
2833 platform_set_drvdata(pdev, gpi_dev);
2834
2835 /* clear and Set capabilities */
2836 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2837 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2838
2839 /* configure dmaengine apis */
2840 gpi_dev->dma_device.directions =
2841 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2842 gpi_dev->dma_device.residue_granularity =
2843 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2844 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2845 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2846 gpi_dev->dma_device.device_alloc_chan_resources =
2847 gpi_alloc_chan_resources;
2848 gpi_dev->dma_device.device_free_chan_resources =
2849 gpi_free_chan_resources;
2850 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2851 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2852 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2853 gpi_dev->dma_device.device_config = gpi_config;
2854 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2855 gpi_dev->dma_device.dev = gpi_dev->dev;
2856 gpi_dev->dma_device.device_pause = gpi_pause;
2857 gpi_dev->dma_device.device_resume = gpi_resume;
2858
2859 /* register with dmaengine framework */
2860 ret = dma_async_device_register(&gpi_dev->dma_device);
2861 if (ret) {
2862 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2863 return ret;
2864 }
2865
2866 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2867 gpi_of_dma_xlate, gpi_dev);
2868 if (ret) {
2869 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2870 return ret;
2871 }
2872
2873 /* setup debug capabilities */
2874 gpi_setup_debug(gpi_dev);
2875 GPI_LOG(gpi_dev, "probe success\n");
2876
2877 return ret;
2878}
2879
2880static const struct of_device_id gpi_of_match[] = {
2881 { .compatible = "qcom,gpi-dma" },
2882 {}
2883};
2884MODULE_DEVICE_TABLE(of, gpi_of_match);
2885
2886static struct platform_driver gpi_driver = {
2887 .probe = gpi_probe,
2888 .driver = {
2889 .name = GPI_DMA_DRV_NAME,
2890 .of_match_table = gpi_of_match,
2891 },
2892};
2893
2894static int __init gpi_init(void)
2895{
2896 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2897 return platform_driver_register(&gpi_driver);
2898}
2899module_init(gpi_init)
2900
2901MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2902MODULE_LICENSE("GPL v2");