blob: 065b765c02b46964334a211fcef3074a325708fe [file] [log] [blame]
Sujeev Dias8fc26002017-11-29 20:51:40 -08001/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
Sujeev Diasdd66ce02016-09-07 11:35:11 -070059#define GPII_INFO(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_INFO) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
62 __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_ERR(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
70 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
71 __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
79 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86
87enum DEBUG_LOG_LVL {
88 LOG_LVL_MASK_ALL,
89 LOG_LVL_CRITICAL,
90 LOG_LVL_ERROR,
91 LOG_LVL_INFO,
92 LOG_LVL_VERBOSE,
93 LOG_LVL_REG_ACCESS,
94};
95
96enum EV_PRIORITY {
97 EV_PRIORITY_ISR,
98 EV_PRIORITY_TASKLET,
99};
100
101#define GPI_DMA_DRV_NAME "gpi_dma"
102#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
103#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
104#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
105#define IPC_LOG_PAGES (40)
106#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800107#define CMD_TIMEOUT_MS (1000)
Sujeev Diasa29e9602017-11-29 22:13:17 -0800108#define GPII_REG(gpii, ch, fmt, ...) do { \
109 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
110 pr_info("%s:%u:%s: " fmt, gpii->label, \
111 ch, __func__, ##__VA_ARGS__); \
112 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
113 ipc_log_string(gpii->ilctxt, \
114 "ch:%u %s: " fmt, ch, \
115 __func__, ##__VA_ARGS__); \
116 } while (0)
117#define GPII_VERB(gpii, ch, fmt, ...) do { \
118 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
119 pr_info("%s:%u:%s: " fmt, gpii->label, \
120 ch, __func__, ##__VA_ARGS__); \
121 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
122 ipc_log_string(gpii->ilctxt, \
123 "ch:%u %s: " fmt, ch, \
124 __func__, ##__VA_ARGS__); \
125 } while (0)
126
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700127#else
128#define IPC_LOG_PAGES (2)
129#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
130#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800131#define CMD_TIMEOUT_MS (250)
Sujeev Diasa29e9602017-11-29 22:13:17 -0800132/* verbose and register logging are disabled if !debug */
133#define GPII_REG(gpii, ch, fmt, ...)
134#define GPII_VERB(gpii, ch, fmt, ...)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700135#endif
136
137#define GPI_LABEL_SIZE (256)
138#define GPI_DBG_COMMON (99)
139#define MAX_CHANNELS_PER_GPII (2)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700140#define GPI_TX_CHAN (0)
141#define GPI_RX_CHAN (1)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700142#define STATE_IGNORE (U32_MAX)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700143#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700144
145struct __packed gpi_error_log_entry {
146 u32 routine : 4;
147 u32 type : 4;
148 u32 reserved0 : 4;
149 u32 code : 4;
150 u32 reserved1 : 3;
151 u32 chid : 5;
152 u32 reserved2 : 1;
153 u32 chtype : 1;
154 u32 ee : 1;
155};
156
157struct __packed xfer_compl_event {
158 u64 ptr;
159 u32 length : 24;
160 u8 code;
161 u16 status;
162 u8 type;
163 u8 chid;
164};
165
166struct __packed immediate_data_event {
167 u8 data_bytes[8];
168 u8 length : 4;
169 u8 resvd : 4;
170 u16 tre_index;
171 u8 code;
172 u16 status;
173 u8 type;
174 u8 chid;
175};
176
177struct __packed qup_notif_event {
178 u32 status;
179 u32 time;
180 u32 count :24;
181 u8 resvd;
182 u16 resvd1;
183 u8 type;
184 u8 chid;
185};
186
187struct __packed gpi_ere {
188 u32 dword[4];
189};
190
191enum GPI_EV_TYPE {
192 XFER_COMPLETE_EV_TYPE = 0x22,
193 IMMEDIATE_DATA_EV_TYPE = 0x30,
194 QUP_NOTIF_EV_TYPE = 0x31,
195 STALE_EV_TYPE = 0xFF,
196};
197
198union __packed gpi_event {
199 struct __packed xfer_compl_event xfer_compl_event;
200 struct __packed immediate_data_event immediate_data_event;
201 struct __packed qup_notif_event qup_notif_event;
202 struct __packed gpi_ere gpi_ere;
203};
204
205enum gpii_irq_settings {
206 DEFAULT_IRQ_SETTINGS,
207 MASK_IEOB_SETTINGS,
208};
209
210enum gpi_ev_state {
211 DEFAULT_EV_CH_STATE = 0,
212 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
213 EV_STATE_ALLOCATED,
214 MAX_EV_STATES
215};
216
217static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
218 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
219 [EV_STATE_ALLOCATED] = "ALLOCATED",
220};
221
222#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
223 "INVALID" : gpi_ev_state_str[state])
224
225enum gpi_ch_state {
226 DEFAULT_CH_STATE = 0x0,
227 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
228 CH_STATE_ALLOCATED = 0x1,
229 CH_STATE_STARTED = 0x2,
230 CH_STATE_STOPPED = 0x3,
231 CH_STATE_STOP_IN_PROC = 0x4,
232 CH_STATE_ERROR = 0xf,
233 MAX_CH_STATES
234};
235
236static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
237 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
238 [CH_STATE_ALLOCATED] = "ALLOCATED",
239 [CH_STATE_STARTED] = "STARTED",
240 [CH_STATE_STOPPED] = "STOPPED",
241 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
242 [CH_STATE_ERROR] = "ERROR",
243};
244
245#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
246 "INVALID" : gpi_ch_state_str[state])
247
248enum gpi_cmd {
249 GPI_CH_CMD_BEGIN,
250 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
251 GPI_CH_CMD_START,
252 GPI_CH_CMD_STOP,
253 GPI_CH_CMD_RESET,
254 GPI_CH_CMD_DE_ALLOC,
255 GPI_CH_CMD_UART_SW_STALE,
256 GPI_CH_CMD_UART_RFR_READY,
257 GPI_CH_CMD_UART_RFR_NOT_READY,
258 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
259 GPI_EV_CMD_BEGIN,
260 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
261 GPI_EV_CMD_RESET,
262 GPI_EV_CMD_DEALLOC,
263 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
264 GPI_MAX_CMD,
265};
266
267#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
268
269static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
270 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
271 [GPI_CH_CMD_START] = "CH START",
272 [GPI_CH_CMD_STOP] = "CH STOP",
273 [GPI_CH_CMD_RESET] = "CH_RESET",
274 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
275 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
276 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
277 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
278 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
279 [GPI_EV_CMD_RESET] = "EV RESET",
280 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
281};
282
283#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
284 gpi_cmd_str[cmd])
285
286static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
287 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
288 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
289 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
290 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
291 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
292 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
293};
294
295#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
296 "INVALID" : gpi_cb_event_str[event])
297
298enum se_protocol {
299 SE_PROTOCOL_SPI = 1,
300 SE_PROTOCOL_UART = 2,
301 SE_PROTOCOL_I2C = 3,
302 SE_MAX_PROTOCOL
303};
304
305/*
306 * @DISABLE_STATE: no register access allowed
307 * @CONFIG_STATE: client has configured the channel
308 * @PREP_HARDWARE: register access is allowed
309 * however, no processing EVENTS
310 * @ACTIVE_STATE: channels are fully operational
311 * @PREPARE_TERIMNATE: graceful termination of channels
312 * register access is allowed
313 * @PAUSE_STATE: channels are active, but not processing any events
314 */
315enum gpi_pm_state {
316 DISABLE_STATE,
317 CONFIG_STATE,
318 PREPARE_HARDWARE,
319 ACTIVE_STATE,
320 PREPARE_TERMINATE,
321 PAUSE_STATE,
322 MAX_PM_STATE
323};
324
325#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
326
327static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
328 [DISABLE_STATE] = "DISABLE",
329 [CONFIG_STATE] = "CONFIG",
330 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
331 [ACTIVE_STATE] = "ACTIVE",
332 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
333 [PAUSE_STATE] = "PAUSE",
334};
335
336#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
337 "INVALID" : gpi_pm_state_str[state])
338
339static const struct {
340 enum gpi_cmd gpi_cmd;
341 u32 opcode;
342 u32 state;
343 u32 timeout_ms;
344} gpi_cmd_info[GPI_MAX_CMD] = {
345 {
346 GPI_CH_CMD_ALLOCATE,
347 GPI_GPII_n_CH_CMD_ALLOCATE,
348 CH_STATE_ALLOCATED,
349 CMD_TIMEOUT_MS,
350 },
351 {
352 GPI_CH_CMD_START,
353 GPI_GPII_n_CH_CMD_START,
354 CH_STATE_STARTED,
355 CMD_TIMEOUT_MS,
356 },
357 {
358 GPI_CH_CMD_STOP,
359 GPI_GPII_n_CH_CMD_STOP,
360 CH_STATE_STOPPED,
361 CMD_TIMEOUT_MS,
362 },
363 {
364 GPI_CH_CMD_RESET,
365 GPI_GPII_n_CH_CMD_RESET,
366 CH_STATE_ALLOCATED,
367 CMD_TIMEOUT_MS,
368 },
369 {
370 GPI_CH_CMD_DE_ALLOC,
371 GPI_GPII_n_CH_CMD_DE_ALLOC,
372 CH_STATE_NOT_ALLOCATED,
373 CMD_TIMEOUT_MS,
374 },
375 {
376 GPI_CH_CMD_UART_SW_STALE,
377 GPI_GPII_n_CH_CMD_UART_SW_STALE,
378 STATE_IGNORE,
379 CMD_TIMEOUT_MS,
380 },
381 {
382 GPI_CH_CMD_UART_RFR_READY,
383 GPI_GPII_n_CH_CMD_UART_RFR_READY,
384 STATE_IGNORE,
385 CMD_TIMEOUT_MS,
386 },
387 {
388 GPI_CH_CMD_UART_RFR_NOT_READY,
389 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
390 STATE_IGNORE,
391 CMD_TIMEOUT_MS,
392 },
393 {
394 GPI_EV_CMD_ALLOCATE,
395 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
396 EV_STATE_ALLOCATED,
397 CMD_TIMEOUT_MS,
398 },
399 {
400 GPI_EV_CMD_RESET,
401 GPI_GPII_n_EV_CH_CMD_RESET,
402 EV_STATE_ALLOCATED,
403 CMD_TIMEOUT_MS,
404 },
405 {
406 GPI_EV_CMD_DEALLOC,
407 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
408 EV_STATE_NOT_ALLOCATED,
409 CMD_TIMEOUT_MS,
410 },
411};
412
413struct gpi_ring {
414 void *pre_aligned;
415 size_t alloc_size;
416 phys_addr_t phys_addr;
417 dma_addr_t dma_handle;
418 void *base;
419 void *wp;
420 void *rp;
421 u32 len;
422 u32 el_size;
423 u32 elements;
424 bool configured;
425};
426
427struct sg_tre {
428 void *ptr;
429 void *wp; /* store chan wp for debugging */
430};
431
432struct gpi_dbg_log {
433 void *addr;
434 u64 time;
435 u32 val;
436 bool read;
437};
438
439struct gpi_dev {
440 struct dma_device dma_device;
441 struct device *dev;
442 struct resource *res;
443 void __iomem *regs;
444 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
445 u32 gpii_mask; /* gpii instances available for apps */
446 u32 ev_factor; /* ev ring length factor */
Sujeev Dias69484212017-08-31 10:06:53 -0700447 u32 smmu_cfg;
448 dma_addr_t iova_base;
449 size_t iova_size;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700450 struct gpii *gpiis;
451 void *ilctxt;
452 u32 ipc_log_lvl;
453 u32 klog_lvl;
454 struct dentry *dentry;
455};
456
457struct gpii_chan {
458 struct virt_dma_chan vc;
459 u32 chid;
460 u32 seid;
461 enum se_protocol protocol;
462 enum EV_PRIORITY priority; /* comes from clients DT node */
463 struct gpii *gpii;
464 enum gpi_ch_state ch_state;
465 enum gpi_pm_state pm_state;
466 void __iomem *ch_cntxt_base_reg;
467 void __iomem *ch_cntxt_db_reg;
468 void __iomem *ch_ring_base_lsb_reg,
469 *ch_ring_rp_lsb_reg,
470 *ch_ring_wp_lsb_reg;
471 void __iomem *ch_cmd_reg;
472 u32 req_tres; /* # of tre's client requested */
473 u32 dir;
474 struct gpi_ring ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700475 struct gpi_client_info client_info;
476};
477
478struct gpii {
479 u32 gpii_id;
480 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
481 struct gpi_dev *gpi_dev;
482 enum EV_PRIORITY ev_priority;
483 enum se_protocol protocol;
484 int irq;
485 void __iomem *regs; /* points to gpi top */
486 void __iomem *ev_cntxt_base_reg;
487 void __iomem *ev_cntxt_db_reg;
488 void __iomem *ev_ring_base_lsb_reg,
489 *ev_ring_rp_lsb_reg,
490 *ev_ring_wp_lsb_reg;
491 void __iomem *ev_cmd_reg;
492 void __iomem *ieob_src_reg;
493 void __iomem *ieob_clr_reg;
494 struct mutex ctrl_lock;
495 enum gpi_ev_state ev_state;
496 bool configured_irq;
497 enum gpi_pm_state pm_state;
498 rwlock_t pm_lock;
499 struct gpi_ring ev_ring;
500 struct tasklet_struct ev_task; /* event processing tasklet */
501 struct completion cmd_completion;
502 enum gpi_cmd gpi_cmd;
503 u32 cntxt_type_irq_msk;
504 void *ilctxt;
505 u32 ipc_log_lvl;
506 u32 klog_lvl;
507 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
508 atomic_t dbg_index;
509 char label[GPI_LABEL_SIZE];
510 struct dentry *dentry;
511};
512
513struct gpi_desc {
514 struct virt_dma_desc vd;
515 void *wp; /* points to TRE last queued during issue_pending */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700516 void *db; /* DB register to program */
517 struct gpii_chan *gpii_chan;
518};
519
Sujeev Dias69484212017-08-31 10:06:53 -0700520#define GPI_SMMU_ATTACH BIT(0)
521#define GPI_SMMU_S1_BYPASS BIT(1)
522#define GPI_SMMU_FAST BIT(2)
523#define GPI_SMMU_ATOMIC BIT(3)
524
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700525const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
526 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
527};
528
529struct dentry *pdentry;
530static irqreturn_t gpi_handle_irq(int irq, void *data);
531static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
532static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
533static void gpi_process_events(struct gpii *gpii);
534
535static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
536{
537 return container_of(dma_chan, struct gpii_chan, vc.chan);
538}
539
540static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
541{
542 return container_of(vd, struct gpi_desc, vd);
543}
544
545static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
546 void *addr)
547{
548 return ring->phys_addr + (addr - ring->base);
549}
550
551static inline void *to_virtual(const struct gpi_ring *const ring,
552 phys_addr_t addr)
553{
554 return ring->base + (addr - ring->phys_addr);
555}
556
557#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
558static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
559{
560 u64 time = sched_clock();
561 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
562 u32 val;
563
564 val = readl_relaxed(addr);
565 index &= (GPI_DBG_LOG_SIZE - 1);
566 (gpii->dbg_log + index)->addr = addr;
567 (gpii->dbg_log + index)->time = time;
568 (gpii->dbg_log + index)->val = val;
569 (gpii->dbg_log + index)->read = true;
570 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
571 addr - gpii->regs, val);
572 return val;
573}
574static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
575{
576 u64 time = sched_clock();
577 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
578
579 index &= (GPI_DBG_LOG_SIZE - 1);
580 (gpii->dbg_log + index)->addr = addr;
581 (gpii->dbg_log + index)->time = time;
582 (gpii->dbg_log + index)->val = val;
583 (gpii->dbg_log + index)->read = false;
584
585 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
586 addr - gpii->regs, val);
587 writel_relaxed(val, addr);
588}
589#else
590static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
591{
592 u32 val = readl_relaxed(addr);
593
594 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
595 addr - gpii->regs, val);
596 return val;
597}
598static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
599{
600 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
601 addr - gpii->regs, val);
602 writel_relaxed(val, addr);
603}
604#endif
605
606/* gpi_write_reg_field - write to specific bit field */
607static inline void gpi_write_reg_field(struct gpii *gpii,
608 void __iomem *addr,
609 u32 mask,
610 u32 shift,
611 u32 val)
612{
613 u32 tmp = gpi_read_reg(gpii, addr);
614
615 tmp &= ~mask;
616 val = tmp | ((val << shift) & mask);
617 gpi_write_reg(gpii, addr, val);
618}
619
620static void gpi_disable_interrupts(struct gpii *gpii)
621{
622 struct {
623 u32 offset;
624 u32 mask;
625 u32 shift;
626 u32 val;
627 } default_reg[] = {
628 {
629 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
630 (gpii->gpii_id),
631 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
632 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
633 0,
634 },
635 {
636 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
637 (gpii->gpii_id),
638 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
639 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
640 0,
641 },
642 {
643 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
644 (gpii->gpii_id),
645 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
646 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
647 0,
648 },
649 {
650 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
651 (gpii->gpii_id),
652 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
653 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
654 0,
655 },
656 {
657 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
658 (gpii->gpii_id),
659 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
660 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
661 0,
662 },
663 {
664 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
665 (gpii->gpii_id),
666 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
667 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
668 0,
669 },
670 {
671 GPI_GPII_n_CNTXT_INTSET_OFFS
672 (gpii->gpii_id),
673 GPI_GPII_n_CNTXT_INTSET_BMSK,
674 GPI_GPII_n_CNTXT_INTSET_SHFT,
675 0,
676 },
677 { 0 },
678 };
679 int i;
680
681 for (i = 0; default_reg[i].offset; i++)
682 gpi_write_reg_field(gpii, gpii->regs +
683 default_reg[i].offset,
684 default_reg[i].mask,
685 default_reg[i].shift,
686 default_reg[i].val);
687 gpii->cntxt_type_irq_msk = 0;
688 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
689 gpii->configured_irq = false;
690}
691
692/* configure and enable interrupts */
693static int gpi_config_interrupts(struct gpii *gpii,
694 enum gpii_irq_settings settings,
695 bool mask)
696{
697 int ret;
698 int i;
699 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
700 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
701 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
702 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
703 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
704 struct {
705 u32 offset;
706 u32 mask;
707 u32 shift;
708 u32 val;
709 } default_reg[] = {
710 {
711 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
712 (gpii->gpii_id),
713 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
714 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
715 def_type,
716 },
717 {
718 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
719 (gpii->gpii_id),
720 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
721 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
722 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
723 },
724 {
725 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
726 (gpii->gpii_id),
727 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
728 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
729 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
730 },
731 {
732 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
733 (gpii->gpii_id),
734 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
735 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
736 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
737 },
738 {
739 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
740 (gpii->gpii_id),
741 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
742 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
743 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
744 },
745 {
746 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
747 (gpii->gpii_id),
748 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
749 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
750 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
751 },
752 {
753 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
754 (gpii->gpii_id),
755 U32_MAX,
756 0,
757 0x0,
758 },
759 {
760 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
761 (gpii->gpii_id),
762 U32_MAX,
763 0,
764 0x0,
765 },
766 {
767 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
768 (gpii->gpii_id),
769 U32_MAX,
770 0,
771 0x0,
772 },
773 {
774 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
775 (gpii->gpii_id),
776 U32_MAX,
777 0,
778 0x0,
779 },
780 {
781 GPI_GPII_n_CNTXT_INTSET_OFFS
782 (gpii->gpii_id),
783 GPI_GPII_n_CNTXT_INTSET_BMSK,
784 GPI_GPII_n_CNTXT_INTSET_SHFT,
785 0x01,
786 },
787 {
788 GPI_GPII_n_ERROR_LOG_OFFS
789 (gpii->gpii_id),
790 U32_MAX,
791 0,
792 0x00,
793 },
794 { 0 },
795 };
796
797 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
798 (gpii->configured_irq) ? 'F' : 'T',
799 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
800 (mask) ? 'T' : 'F');
801
802 if (gpii->configured_irq == false) {
803 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
804 gpi_handle_irq, IRQF_TRIGGER_HIGH,
805 gpii->label, gpii);
806 if (ret < 0) {
807 GPII_CRITIC(gpii, GPI_DBG_COMMON,
808 "error request irq:%d ret:%d\n",
809 gpii->irq, ret);
810 return ret;
811 }
812 }
813
814 if (settings == MASK_IEOB_SETTINGS) {
815 /*
816 * GPII only uses one EV ring per gpii so we can globally
817 * enable/disable IEOB interrupt
818 */
819 if (mask)
820 gpii->cntxt_type_irq_msk |=
821 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
822 else
823 gpii->cntxt_type_irq_msk &=
824 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
825 gpi_write_reg_field(gpii, gpii->regs +
826 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
827 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
828 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
829 gpii->cntxt_type_irq_msk);
830 } else {
831 for (i = 0; default_reg[i].offset; i++)
832 gpi_write_reg_field(gpii, gpii->regs +
833 default_reg[i].offset,
834 default_reg[i].mask,
835 default_reg[i].shift,
836 default_reg[i].val);
837 gpii->cntxt_type_irq_msk = def_type;
838 };
839
840 gpii->configured_irq = true;
841
842 return 0;
843}
844
845/* Sends gpii event or channel command */
846static int gpi_send_cmd(struct gpii *gpii,
847 struct gpii_chan *gpii_chan,
848 enum gpi_cmd gpi_cmd)
849{
850 u32 chid = MAX_CHANNELS_PER_GPII;
851 u32 cmd;
852 unsigned long timeout;
853 void __iomem *cmd_reg;
854
855 if (gpi_cmd >= GPI_MAX_CMD)
856 return -EINVAL;
857 if (IS_CHAN_CMD(gpi_cmd))
858 chid = gpii_chan->chid;
859
860 GPII_INFO(gpii, chid,
861 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
862
863 /* send opcode and wait for completion */
864 reinit_completion(&gpii->cmd_completion);
865 gpii->gpi_cmd = gpi_cmd;
866
867 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
868 gpii->ev_cmd_reg;
869 cmd = IS_CHAN_CMD(gpi_cmd) ?
870 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
871 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
872 gpi_write_reg(gpii, cmd_reg, cmd);
873 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
874 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
875
876 if (!timeout) {
877 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
878 TO_GPI_CMD_STR(gpi_cmd));
879 return -EIO;
880 }
881
882 /* confirm new ch state is correct , if the cmd is a state change cmd */
883 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
884 return 0;
885 if (IS_CHAN_CMD(gpi_cmd) &&
886 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
887 return 0;
888 if (!IS_CHAN_CMD(gpi_cmd) &&
889 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
890 return 0;
891
892 return -EIO;
893}
894
895/* program transfer ring DB register */
896static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
897 struct gpi_ring *ring,
898 void *wp)
899{
900 struct gpii *gpii = gpii_chan->gpii;
901 phys_addr_t p_wp;
902
903 p_wp = to_physical(ring, wp);
904 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
905}
906
907/* program event ring DB register */
908static inline void gpi_write_ev_db(struct gpii *gpii,
909 struct gpi_ring *ring,
910 void *wp)
911{
912 phys_addr_t p_wp;
913
914 p_wp = ring->phys_addr + (wp - ring->base);
915 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
916}
917
918/* notify client with generic event */
919static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
920 enum msm_gpi_cb_event event,
921 u64 status)
922{
923 struct gpii *gpii = gpii_chan->gpii;
924 struct gpi_client_info *client_info = &gpii_chan->client_info;
925 struct msm_gpi_cb msm_gpi_cb = {0};
926
927 GPII_ERR(gpii, gpii_chan->chid,
928 "notifying event:%s with status:%llu\n",
929 TO_GPI_CB_EVENT_STR(event), status);
930
931 msm_gpi_cb.cb_event = event;
932 msm_gpi_cb.status = status;
933 msm_gpi_cb.timestamp = sched_clock();
934 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
935 client_info->cb_param);
936}
937
938/* process transfer completion interrupt */
939static void gpi_process_ieob(struct gpii *gpii)
940{
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700941
Sujeev Diasa29e9602017-11-29 22:13:17 -0800942 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700943
944 /* process events based on priority */
945 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
946 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
947 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
948 tasklet_schedule(&gpii->ev_task);
949 } else {
950 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
951 gpi_process_events(gpii);
952 }
953}
954
955/* process channel control interrupt */
956static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
957{
958 u32 gpii_id = gpii->gpii_id;
959 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
960 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
961 u32 chid;
962 struct gpii_chan *gpii_chan;
963 u32 state;
964
965 /* clear the status */
966 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
967 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
968
969 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
970 if (!(BIT(chid) & ch_irq))
971 continue;
972
973 gpii_chan = &gpii->gpii_chan[chid];
974 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
975 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
976 CNTXT_0_CONFIG);
977 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
978 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
979
980 /*
981 * CH_CMD_DEALLOC cmd always successful. However cmd does
982 * not change hardware status. So overwriting software state
983 * to default state.
984 */
985 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
986 state = DEFAULT_CH_STATE;
987 gpii_chan->ch_state = state;
988 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
989 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
990
991 /*
992 * Triggering complete all if ch_state is not a stop in process.
993 * Stop in process is a transition state and we will wait for
994 * stop interrupt before notifying.
995 */
996 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
997 complete_all(&gpii->cmd_completion);
998
999 /* notifying clients if in error state */
1000 if (gpii_chan->ch_state == CH_STATE_ERROR)
1001 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
1002 __LINE__);
1003 }
1004}
1005
1006/* processing gpi level error interrupts */
1007static void gpi_process_glob_err_irq(struct gpii *gpii)
1008{
1009 u32 gpii_id = gpii->gpii_id;
1010 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1011 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1012 u32 error_log;
1013 u32 chid;
1014 struct gpii_chan *gpii_chan;
1015 struct gpi_client_info *client_info;
1016 struct msm_gpi_cb msm_gpi_cb;
1017 struct gpi_error_log_entry *log_entry =
1018 (struct gpi_error_log_entry *)&error_log;
1019
1020 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1021 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1022
1023 /* only error interrupt should be set */
1024 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1025 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1026 irq_stts);
1027 goto error_irq;
1028 }
1029
1030 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1031 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1032 gpi_write_reg(gpii, gpii->regs + offset, 0);
1033
1034 /* get channel info */
1035 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1036 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1037 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1038 chid);
1039 goto error_irq;
1040 }
1041
1042 gpii_chan = &gpii->gpii_chan[chid];
1043 client_info = &gpii_chan->client_info;
1044
1045 /* notify client with error log */
1046 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1047 msm_gpi_cb.error_log.routine = log_entry->routine;
1048 msm_gpi_cb.error_log.type = log_entry->type;
1049 msm_gpi_cb.error_log.error_code = log_entry->code;
1050 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1051 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1052 GPII_ERR(gpii, gpii_chan->chid,
1053 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1054 log_entry->ee, log_entry->chtype,
1055 msm_gpi_cb.error_log.routine,
1056 msm_gpi_cb.error_log.type,
1057 msm_gpi_cb.error_log.error_code);
1058 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1059 client_info->cb_param);
1060
1061 return;
1062
1063error_irq:
1064 for (chid = 0, gpii_chan = gpii->gpii_chan;
1065 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1066 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1067 irq_stts);
1068}
1069
1070/* gpii interrupt handler */
1071static irqreturn_t gpi_handle_irq(int irq, void *data)
1072{
1073 struct gpii *gpii = data;
1074 u32 type;
1075 unsigned long flags;
1076 u32 offset;
1077 u32 gpii_id = gpii->gpii_id;
1078
1079 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1080
1081 read_lock_irqsave(&gpii->pm_lock, flags);
1082
1083 /*
1084 * States are out of sync to receive interrupt
1085 * while software state is in DISABLE state, bailing out.
1086 */
1087 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1088 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1089 "receive interrupt while in %s state\n",
1090 TO_GPI_PM_STR(gpii->pm_state));
1091 goto exit_irq;
1092 }
1093
1094 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1095 type = gpi_read_reg(gpii, gpii->regs + offset);
1096
1097 do {
1098 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1099 type);
1100 /* global gpii error */
1101 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1102 GPII_ERR(gpii, GPI_DBG_COMMON,
1103 "processing global error irq\n");
1104 gpi_process_glob_err_irq(gpii);
1105 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1106 }
1107
Sujeev Diasa29e9602017-11-29 22:13:17 -08001108 /* transfer complete interrupt */
1109 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1110 GPII_VERB(gpii, GPI_DBG_COMMON,
1111 "process IEOB interrupts\n");
1112 gpi_process_ieob(gpii);
1113 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1114 }
1115
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001116 /* event control irq */
1117 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1118 u32 ev_state;
1119 u32 ev_ch_irq;
1120
1121 GPII_INFO(gpii, GPI_DBG_COMMON,
1122 "processing EV CTRL interrupt\n");
1123 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1124 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1125
1126 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1127 (gpii_id);
1128 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1129 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1130 CNTXT_0_CONFIG);
1131 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1132 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1133
1134 /*
1135 * CMD EV_CMD_DEALLOC is always successful. However
1136 * cmd does not change hardware status. So overwriting
1137 * software state to default state.
1138 */
1139 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1140 ev_state = DEFAULT_EV_CH_STATE;
1141
1142 gpii->ev_state = ev_state;
1143 GPII_INFO(gpii, GPI_DBG_COMMON,
1144 "setting EV state to %s\n",
1145 TO_GPI_EV_STATE_STR(gpii->ev_state));
1146 complete_all(&gpii->cmd_completion);
1147 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1148 }
1149
1150 /* channel control irq */
1151 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1152 GPII_INFO(gpii, GPI_DBG_COMMON,
1153 "process CH CTRL interrupts\n");
1154 gpi_process_ch_ctrl_irq(gpii);
1155 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1156 }
1157
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001158 if (type) {
1159 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1160 "Unhandled interrupt status:0x%x\n", type);
1161 goto exit_irq;
1162 }
1163 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1164 type = gpi_read_reg(gpii, gpii->regs + offset);
1165 } while (type);
1166
1167exit_irq:
1168 read_unlock_irqrestore(&gpii->pm_lock, flags);
1169 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1170
1171 return IRQ_HANDLED;
1172}
1173
1174/* process qup notification events */
1175static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1176 struct qup_notif_event *notif_event)
1177{
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001178 struct gpi_client_info *client_info = &gpii_chan->client_info;
1179 struct msm_gpi_cb msm_gpi_cb;
1180
Sujeev Diasa29e9602017-11-29 22:13:17 -08001181 GPII_VERB(gpii_chan->gpii, gpii_chan->chid,
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001182 "status:0x%x time:0x%x count:0x%x\n",
1183 notif_event->status, notif_event->time, notif_event->count);
1184
1185 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1186 msm_gpi_cb.status = notif_event->status;
1187 msm_gpi_cb.timestamp = notif_event->time;
1188 msm_gpi_cb.count = notif_event->count;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001189 GPII_VERB(gpii_chan->gpii, gpii_chan->chid, "sending CB event:%s\n",
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001190 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1191 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1192 client_info->cb_param);
1193}
1194
1195/* process DMA Immediate completion data events */
1196static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1197 struct immediate_data_event *imed_event)
1198{
1199 struct gpii *gpii = gpii_chan->gpii;
1200 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001201 struct virt_dma_desc *vd;
1202 struct gpi_desc *gpi_desc;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001203 void *tre = ch_ring->base +
1204 (ch_ring->el_size * imed_event->tre_index);
1205 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
Sujeev Diase0197092017-11-27 20:36:26 -08001206 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001207
1208 /*
1209 * If channel not active don't process event but let
1210 * client know pending event is available
1211 */
1212 if (gpii_chan->pm_state != ACTIVE_STATE) {
1213 GPII_ERR(gpii, gpii_chan->chid,
1214 "skipping processing event because ch @ %s state\n",
1215 TO_GPI_PM_STR(gpii_chan->pm_state));
1216 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1217 __LINE__);
1218 return;
1219 }
1220
Sujeev Diase0197092017-11-27 20:36:26 -08001221 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001222 vd = vchan_next_desc(&gpii_chan->vc);
1223 if (!vd) {
1224 struct gpi_ere *gpi_ere;
1225 struct msm_gpi_tre *gpi_tre;
1226
Sujeev Diase0197092017-11-27 20:36:26 -08001227 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001228 GPII_ERR(gpii, gpii_chan->chid,
1229 "event without a pending descriptor!\n");
1230 gpi_ere = (struct gpi_ere *)imed_event;
1231 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1232 gpi_ere->dword[0], gpi_ere->dword[1],
1233 gpi_ere->dword[2], gpi_ere->dword[3]);
1234 gpi_tre = tre;
1235 GPII_ERR(gpii, gpii_chan->chid,
1236 "Pending TRE: %08x %08x %08x %08x\n",
1237 gpi_tre->dword[0], gpi_tre->dword[1],
1238 gpi_tre->dword[2], gpi_tre->dword[3]);
1239 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1240 __LINE__);
1241 return;
1242 }
1243 gpi_desc = to_gpi_desc(vd);
1244
1245 /* Event TR RP gen. don't match descriptor TR */
1246 if (gpi_desc->wp != tre) {
Sujeev Diase0197092017-11-27 20:36:26 -08001247 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001248 GPII_ERR(gpii, gpii_chan->chid,
1249 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1250 to_physical(ch_ring, gpi_desc->wp),
1251 to_physical(ch_ring, tre));
1252 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1253 __LINE__);
1254 return;
1255 }
1256
1257 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001258 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001259
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001260
1261 /*
1262 * RP pointed by Event is to last TRE processed,
1263 * we need to update ring rp to tre + 1
1264 */
1265 tre += ch_ring->el_size;
1266 if (tre >= (ch_ring->base + ch_ring->len))
1267 tre = ch_ring->base;
1268 ch_ring->rp = tre;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001269
1270 /* make sure rp updates are immediately visible to all cores */
1271 smp_wmb();
1272
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001273 tx_cb_param = vd->tx.callback_param;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001274 if (vd->tx.callback && tx_cb_param) {
Sujeev Dias8fc26002017-11-29 20:51:40 -08001275 struct msm_gpi_tre *imed_tre = &tx_cb_param->imed_tre;
1276
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001277 GPII_VERB(gpii, gpii_chan->chid,
1278 "cb_length:%u compl_code:0x%x status:0x%x\n",
1279 imed_event->length, imed_event->code,
1280 imed_event->status);
Sujeev Dias8fc26002017-11-29 20:51:40 -08001281 /* Update immediate data if any from event */
1282 *imed_tre = *((struct msm_gpi_tre *)imed_event);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001283 tx_cb_param->length = imed_event->length;
1284 tx_cb_param->completion_code = imed_event->code;
1285 tx_cb_param->status = imed_event->status;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001286 vd->tx.callback(tx_cb_param);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001287 }
Sujeev Diasa29e9602017-11-29 22:13:17 -08001288 kfree(gpi_desc);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001289}
1290
1291/* processing transfer completion events */
1292static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1293 struct xfer_compl_event *compl_event)
1294{
1295 struct gpii *gpii = gpii_chan->gpii;
1296 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001297 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001298 struct virt_dma_desc *vd;
1299 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1300 struct gpi_desc *gpi_desc;
Sujeev Diase0197092017-11-27 20:36:26 -08001301 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001302
1303 /* only process events on active channel */
1304 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1305 GPII_ERR(gpii, gpii_chan->chid,
1306 "skipping processing event because ch @ %s state\n",
1307 TO_GPI_PM_STR(gpii_chan->pm_state));
1308 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1309 __LINE__);
1310 return;
1311 }
1312
Sujeev Diase0197092017-11-27 20:36:26 -08001313 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001314 vd = vchan_next_desc(&gpii_chan->vc);
1315 if (!vd) {
1316 struct gpi_ere *gpi_ere;
1317
Sujeev Diase0197092017-11-27 20:36:26 -08001318 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001319 GPII_ERR(gpii, gpii_chan->chid,
1320 "Event without a pending descriptor!\n");
1321 gpi_ere = (struct gpi_ere *)compl_event;
1322 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1323 gpi_ere->dword[0], gpi_ere->dword[1],
1324 gpi_ere->dword[2], gpi_ere->dword[3]);
1325 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1326 __LINE__);
1327 return;
1328 }
1329
1330 gpi_desc = to_gpi_desc(vd);
1331
1332 /* TRE Event generated didn't match descriptor's TRE */
1333 if (gpi_desc->wp != ev_rp) {
Sujeev Diase0197092017-11-27 20:36:26 -08001334 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001335 GPII_ERR(gpii, gpii_chan->chid,
1336 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1337 to_physical(ch_ring, gpi_desc->wp),
1338 to_physical(ch_ring, ev_rp));
1339 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1340 __LINE__);
1341 return;
1342 }
1343
1344 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001345 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001346
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001347
1348 /*
1349 * RP pointed by Event is to last TRE processed,
1350 * we need to update ring rp to ev_rp + 1
1351 */
1352 ev_rp += ch_ring->el_size;
1353 if (ev_rp >= (ch_ring->base + ch_ring->len))
1354 ev_rp = ch_ring->base;
1355 ch_ring->rp = ev_rp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001356
1357 /* update must be visible to other cores */
1358 smp_wmb();
1359
1360 tx_cb_param = vd->tx.callback_param;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001361 if (vd->tx.callback && tx_cb_param) {
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001362 GPII_VERB(gpii, gpii_chan->chid,
1363 "cb_length:%u compl_code:0x%x status:0x%x\n",
1364 compl_event->length, compl_event->code,
1365 compl_event->status);
1366 tx_cb_param->length = compl_event->length;
1367 tx_cb_param->completion_code = compl_event->code;
1368 tx_cb_param->status = compl_event->status;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001369 vd->tx.callback(tx_cb_param);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001370 }
Sujeev Diasa29e9602017-11-29 22:13:17 -08001371 kfree(gpi_desc);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001372}
1373
1374/* process all events */
1375static void gpi_process_events(struct gpii *gpii)
1376{
1377 struct gpi_ring *ev_ring = &gpii->ev_ring;
Sujeev Diasa29e9602017-11-29 22:13:17 -08001378 phys_addr_t cntxt_rp, local_rp;
1379 void *rp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001380 union gpi_event *gpi_event;
1381 struct gpii_chan *gpii_chan;
1382 u32 chid, type;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001383
1384 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
Sujeev Diasa29e9602017-11-29 22:13:17 -08001385 rp = to_virtual(ev_ring, cntxt_rp);
1386 local_rp = to_physical(ev_ring, ev_ring->rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001387
Sujeev Diasa29e9602017-11-29 22:13:17 -08001388 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp:%pa local_rp:%pa\n",
1389 &cntxt_rp, &local_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001390
1391 do {
Sujeev Diasa29e9602017-11-29 22:13:17 -08001392 while (rp != ev_ring->rp) {
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001393 gpi_event = ev_ring->rp;
1394 chid = gpi_event->xfer_compl_event.chid;
1395 type = gpi_event->xfer_compl_event.type;
1396 GPII_VERB(gpii, GPI_DBG_COMMON,
Sujeev Diasa29e9602017-11-29 22:13:17 -08001397 "chid:%u type:0x%x %08x %08x %08x %08x\n",
1398 chid, type,
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001399 gpi_event->gpi_ere.dword[0],
1400 gpi_event->gpi_ere.dword[1],
1401 gpi_event->gpi_ere.dword[2],
1402 gpi_event->gpi_ere.dword[3]);
1403
1404 switch (type) {
1405 case XFER_COMPLETE_EV_TYPE:
1406 gpii_chan = &gpii->gpii_chan[chid];
1407 gpi_process_xfer_compl_event(gpii_chan,
1408 &gpi_event->xfer_compl_event);
1409 break;
1410 case STALE_EV_TYPE:
1411 GPII_VERB(gpii, GPI_DBG_COMMON,
1412 "stale event, not processing\n");
1413 break;
1414 case IMMEDIATE_DATA_EV_TYPE:
1415 gpii_chan = &gpii->gpii_chan[chid];
1416 gpi_process_imed_data_event(gpii_chan,
1417 &gpi_event->immediate_data_event);
1418 break;
1419 case QUP_NOTIF_EV_TYPE:
1420 gpii_chan = &gpii->gpii_chan[chid];
1421 gpi_process_qup_notif_event(gpii_chan,
1422 &gpi_event->qup_notif_event);
1423 break;
1424 default:
1425 GPII_VERB(gpii, GPI_DBG_COMMON,
1426 "not supported event type:0x%x\n",
1427 type);
1428 }
1429 gpi_ring_recycle_ev_element(ev_ring);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001430 }
1431 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1432
1433 /* clear pending IEOB events */
Sujeev Diasa29e9602017-11-29 22:13:17 -08001434 gpi_write_reg(gpii, gpii->ieob_clr_reg, BIT(0));
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001435
1436 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
Sujeev Diasa29e9602017-11-29 22:13:17 -08001437 rp = to_virtual(ev_ring, cntxt_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001438
Sujeev Diasa29e9602017-11-29 22:13:17 -08001439 } while (rp != ev_ring->rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001440
Sujeev Diasa29e9602017-11-29 22:13:17 -08001441 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:%pa\n", &cntxt_rp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001442}
1443
1444/* processing events using tasklet */
1445static void gpi_ev_tasklet(unsigned long data)
1446{
1447 struct gpii *gpii = (struct gpii *)data;
1448
1449 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1450
1451 read_lock_bh(&gpii->pm_lock);
1452 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1453 read_unlock_bh(&gpii->pm_lock);
1454 GPII_ERR(gpii, GPI_DBG_COMMON,
1455 "not processing any events, pm_state:%s\n",
1456 TO_GPI_PM_STR(gpii->pm_state));
1457 return;
1458 }
1459
1460 /* process the events */
1461 gpi_process_events(gpii);
1462
1463 /* enable IEOB, switching back to interrupts */
1464 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1465 read_unlock_bh(&gpii->pm_lock);
1466
1467 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1468}
1469
1470/* marks all pending events for the channel as stale */
1471void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1472{
1473 struct gpii *gpii = gpii_chan->gpii;
1474 struct gpi_ring *ev_ring = &gpii->ev_ring;
1475 void *ev_rp;
1476 u32 cntxt_rp, local_rp;
1477
1478 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1479 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1480
1481 ev_rp = ev_ring->rp;
1482 local_rp = (u32)to_physical(ev_ring, ev_rp);
1483 while (local_rp != cntxt_rp) {
1484 union gpi_event *gpi_event = ev_rp;
1485 u32 chid = gpi_event->xfer_compl_event.chid;
1486
1487 if (chid == gpii_chan->chid)
1488 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1489 ev_rp += ev_ring->el_size;
1490 if (ev_rp >= (ev_ring->base + ev_ring->len))
1491 ev_rp = ev_ring->base;
1492 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1493 local_rp = (u32)to_physical(ev_ring, ev_rp);
1494 }
1495}
1496
1497/* reset sw state and issue channel reset or de-alloc */
1498static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1499{
1500 struct gpii *gpii = gpii_chan->gpii;
1501 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001502 unsigned long flags;
1503 LIST_HEAD(list);
1504 int ret;
1505
1506 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1507 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1508 if (ret) {
1509 GPII_ERR(gpii, gpii_chan->chid,
1510 "Error with cmd:%s ret:%d\n",
1511 TO_GPI_CMD_STR(gpi_cmd), ret);
1512 return ret;
1513 }
1514
1515 /* initialize the local ring ptrs */
1516 ch_ring->rp = ch_ring->base;
1517 ch_ring->wp = ch_ring->base;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001518
1519 /* visible to other cores */
1520 smp_wmb();
1521
1522 /* check event ring for any stale events */
1523 write_lock_irq(&gpii->pm_lock);
1524 gpi_mark_stale_events(gpii_chan);
1525
1526 /* remove all async descriptors */
1527 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1528 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1529 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1530 write_unlock_irq(&gpii->pm_lock);
1531 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1532
1533 return 0;
1534}
1535
1536static int gpi_start_chan(struct gpii_chan *gpii_chan)
1537{
1538 struct gpii *gpii = gpii_chan->gpii;
1539 int ret;
1540
1541 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1542
1543 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1544 if (ret) {
1545 GPII_ERR(gpii, gpii_chan->chid,
1546 "Error with cmd:%s ret:%d\n",
1547 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1548 return ret;
1549 }
1550
1551 /* gpii CH is active now */
1552 write_lock_irq(&gpii->pm_lock);
1553 gpii_chan->pm_state = ACTIVE_STATE;
1554 write_unlock_irq(&gpii->pm_lock);
1555
1556 return 0;
1557}
1558
1559/* allocate and configure the transfer channel */
1560static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1561{
1562 struct gpii *gpii = gpii_chan->gpii;
1563 struct gpi_ring *ring = &gpii_chan->ch_ring;
1564 int i;
1565 int ret;
1566 struct {
1567 void *base;
1568 int offset;
1569 u32 val;
1570 } ch_reg[] = {
1571 {
1572 gpii_chan->ch_cntxt_base_reg,
1573 CNTXT_0_CONFIG,
1574 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1575 gpii_chan->dir,
1576 GPI_CHTYPE_PROTO_GPI),
1577 },
1578 {
1579 gpii_chan->ch_cntxt_base_reg,
1580 CNTXT_1_R_LENGTH,
1581 ring->len,
1582 },
1583 {
1584 gpii_chan->ch_cntxt_base_reg,
1585 CNTXT_2_RING_BASE_LSB,
1586 (u32)ring->phys_addr,
1587 },
1588 {
1589 gpii_chan->ch_cntxt_base_reg,
1590 CNTXT_3_RING_BASE_MSB,
1591 (u32)(ring->phys_addr >> 32),
1592 },
1593 { /* program MSB of DB register with ring base */
1594 gpii_chan->ch_cntxt_db_reg,
1595 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1596 (u32)(ring->phys_addr >> 32),
1597 },
1598 {
1599 gpii->regs,
1600 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1601 gpii_chan->chid),
1602 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1603 gpii_chan->protocol,
1604 gpii_chan->seid),
1605 },
1606 {
1607 gpii->regs,
1608 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1609 gpii_chan->chid),
1610 0,
1611 },
1612 {
1613 gpii->regs,
1614 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1615 gpii_chan->chid),
1616 0,
1617 },
1618 {
1619 gpii->regs,
1620 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1621 gpii_chan->chid),
1622 0,
1623 },
1624 {
1625 gpii->regs,
1626 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1627 gpii_chan->chid),
1628 1,
1629 },
1630 { NULL },
1631 };
1632
1633 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1634
1635 if (send_alloc_cmd) {
1636 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1637 if (ret) {
1638 GPII_ERR(gpii, gpii_chan->chid,
1639 "Error with cmd:%s ret:%d\n",
1640 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1641 return ret;
1642 }
1643 }
1644
1645 /* program channel cntxt registers */
1646 for (i = 0; ch_reg[i].base; i++)
1647 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1648 ch_reg[i].val);
1649 /* flush all the writes */
1650 wmb();
1651 return 0;
1652}
1653
1654/* allocate and configure event ring */
1655static int gpi_alloc_ev_chan(struct gpii *gpii)
1656{
1657 struct gpi_ring *ring = &gpii->ev_ring;
1658 int i;
1659 int ret;
1660 struct {
1661 void *base;
1662 int offset;
1663 u32 val;
1664 } ev_reg[] = {
1665 {
1666 gpii->ev_cntxt_base_reg,
1667 CNTXT_0_CONFIG,
1668 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1669 GPI_INTTYPE_IRQ,
1670 GPI_CHTYPE_GPI_EV),
1671 },
1672 {
1673 gpii->ev_cntxt_base_reg,
1674 CNTXT_1_R_LENGTH,
1675 ring->len,
1676 },
1677 {
1678 gpii->ev_cntxt_base_reg,
1679 CNTXT_2_RING_BASE_LSB,
1680 (u32)ring->phys_addr,
1681 },
1682 {
1683 gpii->ev_cntxt_base_reg,
1684 CNTXT_3_RING_BASE_MSB,
1685 (u32)(ring->phys_addr >> 32),
1686 },
1687 {
1688 /* program db msg with ring base msb */
1689 gpii->ev_cntxt_db_reg,
1690 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1691 (u32)(ring->phys_addr >> 32),
1692 },
1693 {
1694 gpii->ev_cntxt_base_reg,
1695 CNTXT_8_RING_INT_MOD,
1696 0,
1697 },
1698 {
1699 gpii->ev_cntxt_base_reg,
1700 CNTXT_10_RING_MSI_LSB,
1701 0,
1702 },
1703 {
1704 gpii->ev_cntxt_base_reg,
1705 CNTXT_11_RING_MSI_MSB,
1706 0,
1707 },
1708 {
1709 gpii->ev_cntxt_base_reg,
1710 CNTXT_8_RING_INT_MOD,
1711 0,
1712 },
1713 {
1714 gpii->ev_cntxt_base_reg,
1715 CNTXT_12_RING_RP_UPDATE_LSB,
1716 0,
1717 },
1718 {
1719 gpii->ev_cntxt_base_reg,
1720 CNTXT_13_RING_RP_UPDATE_MSB,
1721 0,
1722 },
1723 { NULL },
1724 };
1725
1726 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1727
1728 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1729 if (ret) {
1730 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1731 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1732 return ret;
1733 }
1734
1735 /* program event context */
1736 for (i = 0; ev_reg[i].base; i++)
1737 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1738 ev_reg[i].val);
1739
1740 /* add events to ring */
1741 ring->wp = (ring->base + ring->len - ring->el_size);
1742
1743 /* flush all the writes */
1744 wmb();
1745
1746 /* gpii is active now */
1747 write_lock_irq(&gpii->pm_lock);
1748 gpii->pm_state = ACTIVE_STATE;
1749 write_unlock_irq(&gpii->pm_lock);
1750 gpi_write_ev_db(gpii, ring, ring->wp);
1751
1752 return 0;
1753}
1754
1755/* calculate # of ERE/TRE available to queue */
1756static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1757{
1758 int elements = 0;
1759
1760 if (ring->wp < ring->rp)
1761 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1762 else {
1763 elements = (ring->rp - ring->base) / ring->el_size;
1764 elements += ((ring->base + ring->len - ring->wp) /
1765 ring->el_size) - 1;
1766 }
1767
1768 return elements;
1769}
1770
1771static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1772{
1773
1774 if (gpi_ring_num_elements_avail(ring) <= 0)
1775 return -ENOMEM;
1776
1777 *wp = ring->wp;
1778 ring->wp += ring->el_size;
1779 if (ring->wp >= (ring->base + ring->len))
1780 ring->wp = ring->base;
1781
1782 /* visible to other cores */
1783 smp_wmb();
1784
1785 return 0;
1786}
1787
1788static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1789{
1790 /* Update the WP */
1791 ring->wp += ring->el_size;
1792 if (ring->wp >= (ring->base + ring->len))
1793 ring->wp = ring->base;
1794
1795 /* Update the RP */
1796 ring->rp += ring->el_size;
1797 if (ring->rp >= (ring->base + ring->len))
1798 ring->rp = ring->base;
1799
1800 /* visible to other cores */
1801 smp_wmb();
1802}
1803
1804static void gpi_free_ring(struct gpi_ring *ring,
1805 struct gpii *gpii)
1806{
Sujeev Dias8fc26002017-11-29 20:51:40 -08001807 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1808 ring->pre_aligned, ring->dma_handle);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001809 memset(ring, 0, sizeof(*ring));
1810}
1811
1812/* allocate memory for transfer and event rings */
1813static int gpi_alloc_ring(struct gpi_ring *ring,
1814 u32 elements,
1815 u32 el_size,
Sujeev Dias8fc26002017-11-29 20:51:40 -08001816 struct gpii *gpii)
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001817{
1818 u64 len = elements * el_size;
1819 int bit;
1820
Sujeev Dias8fc26002017-11-29 20:51:40 -08001821 /* ring len must be power of 2 */
1822 bit = find_last_bit((unsigned long *)&len, 32);
1823 if (((1 << bit) - 1) & len)
1824 bit++;
1825 len = 1 << bit;
1826 ring->alloc_size = (len + (len - 1));
1827 GPII_INFO(gpii, GPI_DBG_COMMON,
1828 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
1829 elements, el_size, (elements * el_size), len,
1830 ring->alloc_size);
1831 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1832 ring->alloc_size,
1833 &ring->dma_handle, GFP_KERNEL);
1834 if (!ring->pre_aligned) {
1835 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1836 "could not alloc size:%lu mem for ring\n",
1837 ring->alloc_size);
1838 return -ENOMEM;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001839 }
1840
Sujeev Dias8fc26002017-11-29 20:51:40 -08001841 /* align the physical mem */
1842 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1843 ring->base = ring->pre_aligned + (ring->phys_addr - ring->dma_handle);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001844 ring->rp = ring->base;
1845 ring->wp = ring->base;
1846 ring->len = len;
1847 ring->el_size = el_size;
1848 ring->elements = ring->len / ring->el_size;
1849 memset(ring->base, 0, ring->len);
1850 ring->configured = true;
1851
1852 /* update to other cores */
1853 smp_wmb();
1854
1855 GPII_INFO(gpii, GPI_DBG_COMMON,
1856 "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
1857 ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
1858 ring->elements);
1859
1860 return 0;
1861}
1862
1863/* copy tre into transfer ring */
1864static void gpi_queue_xfer(struct gpii *gpii,
1865 struct gpii_chan *gpii_chan,
1866 struct msm_gpi_tre *gpi_tre,
Sujeev Dias8fc26002017-11-29 20:51:40 -08001867 void **wp)
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001868{
1869 struct msm_gpi_tre *ch_tre;
1870 int ret;
1871
1872 /* get next tre location we can copy */
1873 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1874 if (unlikely(ret)) {
1875 GPII_CRITIC(gpii, gpii_chan->chid,
1876 "Error adding ring element to xfer ring\n");
1877 return;
1878 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001879
1880 /* copy the tre info */
1881 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001882 *wp = ch_tre;
1883}
1884
1885/* reset and restart transfer channel */
1886int gpi_terminate_all(struct dma_chan *chan)
1887{
1888 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1889 struct gpii *gpii = gpii_chan->gpii;
1890 int schid, echid, i;
1891 int ret = 0;
1892
1893 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1894 mutex_lock(&gpii->ctrl_lock);
1895
1896 /*
1897 * treat both channels as a group if its protocol is not UART
1898 * STOP, RESET, or START needs to be in lockstep
1899 */
1900 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1901 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1902 MAX_CHANNELS_PER_GPII;
1903
1904 /* stop the channel */
1905 for (i = schid; i < echid; i++) {
1906 gpii_chan = &gpii->gpii_chan[i];
1907
1908 /* disable ch state so no more TRE processing */
1909 write_lock_irq(&gpii->pm_lock);
1910 gpii_chan->pm_state = PREPARE_TERMINATE;
1911 write_unlock_irq(&gpii->pm_lock);
1912
1913 /* send command to Stop the channel */
1914 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1915 if (ret)
1916 GPII_ERR(gpii, gpii_chan->chid,
1917 "Error Stopping Channel:%d resetting anyway\n",
1918 ret);
1919 }
1920
1921 /* reset the channels (clears any pending tre) */
1922 for (i = schid; i < echid; i++) {
1923 gpii_chan = &gpii->gpii_chan[i];
1924
1925 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1926 if (ret) {
1927 GPII_ERR(gpii, gpii_chan->chid,
1928 "Error resetting channel ret:%d\n", ret);
1929 goto terminate_exit;
1930 }
1931
1932 /* reprogram channel CNTXT */
1933 ret = gpi_alloc_chan(gpii_chan, false);
1934 if (ret) {
1935 GPII_ERR(gpii, gpii_chan->chid,
1936 "Error alloc_channel ret:%d\n", ret);
1937 goto terminate_exit;
1938 }
1939 }
1940
1941 /* restart the channels */
1942 for (i = schid; i < echid; i++) {
1943 gpii_chan = &gpii->gpii_chan[i];
1944
1945 ret = gpi_start_chan(gpii_chan);
1946 if (ret) {
1947 GPII_ERR(gpii, gpii_chan->chid,
1948 "Error Starting Channel ret:%d\n", ret);
1949 goto terminate_exit;
1950 }
1951 }
1952
1953terminate_exit:
1954 mutex_unlock(&gpii->ctrl_lock);
1955 return ret;
1956}
1957
1958/* pause dma transfer for all channels */
1959static int gpi_pause(struct dma_chan *chan)
1960{
1961 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1962 struct gpii *gpii = gpii_chan->gpii;
1963 int i, ret;
1964
1965 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1966 mutex_lock(&gpii->ctrl_lock);
1967
1968 /*
1969 * pause/resume are per gpii not per channel, so
1970 * client needs to call pause only once
1971 */
1972 if (gpii->pm_state == PAUSE_STATE) {
1973 GPII_INFO(gpii, gpii_chan->chid,
1974 "channel is already paused\n");
1975 mutex_unlock(&gpii->ctrl_lock);
1976 return 0;
1977 }
1978
1979 /* send stop command to stop the channels */
1980 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
1981 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
1982 if (ret) {
1983 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
1984 "Error stopping chan, ret:%d\n", ret);
1985 mutex_unlock(&gpii->ctrl_lock);
1986 return ret;
1987 }
1988 }
1989
1990 disable_irq(gpii->irq);
1991
1992 /* Wait for threads to complete out */
1993 tasklet_kill(&gpii->ev_task);
1994
1995 write_lock_irq(&gpii->pm_lock);
1996 gpii->pm_state = PAUSE_STATE;
1997 write_unlock_irq(&gpii->pm_lock);
1998 mutex_unlock(&gpii->ctrl_lock);
1999
2000 return 0;
2001}
2002
2003/* resume dma transfer */
2004static int gpi_resume(struct dma_chan *chan)
2005{
2006 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2007 struct gpii *gpii = gpii_chan->gpii;
2008 int i;
2009 int ret;
2010
2011 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2012
2013 mutex_lock(&gpii->ctrl_lock);
2014 if (gpii->pm_state == ACTIVE_STATE) {
2015 GPII_INFO(gpii, gpii_chan->chid,
2016 "channel is already active\n");
2017 mutex_unlock(&gpii->ctrl_lock);
2018 return 0;
2019 }
2020
2021 enable_irq(gpii->irq);
2022
2023 /* send start command to start the channels */
2024 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2025 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2026 if (ret) {
2027 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2028 "Erro starting chan, ret:%d\n", ret);
2029 mutex_unlock(&gpii->ctrl_lock);
2030 return ret;
2031 }
2032 }
2033
2034 write_lock_irq(&gpii->pm_lock);
2035 gpii->pm_state = ACTIVE_STATE;
2036 write_unlock_irq(&gpii->pm_lock);
2037 mutex_unlock(&gpii->ctrl_lock);
2038
2039 return 0;
2040}
2041
2042void gpi_desc_free(struct virt_dma_desc *vd)
2043{
2044 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2045
2046 kfree(gpi_desc);
2047}
2048
2049/* copy tre into transfer ring */
2050struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2051 struct scatterlist *sgl,
2052 unsigned int sg_len,
2053 enum dma_transfer_direction direction,
2054 unsigned long flags,
2055 void *context)
2056{
2057 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2058 struct gpii *gpii = gpii_chan->gpii;
Sujeev Dias8fc26002017-11-29 20:51:40 -08002059 u32 nr;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002060 u32 nr_req = 0;
2061 int i, j;
2062 struct scatterlist *sg;
2063 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002064 void *tre, *wp = NULL;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002065 const gfp_t gfp = GFP_ATOMIC;
2066 struct gpi_desc *gpi_desc;
2067
2068 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2069
2070 if (!is_slave_direction(direction)) {
2071 GPII_ERR(gpii, gpii_chan->chid,
2072 "invalid dma direction: %d\n", direction);
2073 return NULL;
2074 }
2075
2076 /* calculate # of elements required & available */
2077 nr = gpi_ring_num_elements_avail(ch_ring);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002078 for_each_sg(sgl, sg, sg_len, i) {
2079 GPII_VERB(gpii, gpii_chan->chid,
2080 "%d of %u len:%u\n", i, sg_len, sg->length);
2081 nr_req += (sg->length / ch_ring->el_size);
2082 }
Sujeev Dias8fc26002017-11-29 20:51:40 -08002083 GPII_VERB(gpii, gpii_chan->chid, "el avail:%u req:%u\n", nr, nr_req);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002084
Sujeev Dias8fc26002017-11-29 20:51:40 -08002085 if (nr < nr_req) {
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002086 GPII_ERR(gpii, gpii_chan->chid,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002087 "not enough space in ring, avail:%u required:%u\n",
2088 nr, nr_req);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002089 return NULL;
2090 }
2091
2092 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2093 if (!gpi_desc) {
2094 GPII_ERR(gpii, gpii_chan->chid,
2095 "out of memory for descriptor\n");
2096 return NULL;
2097 }
2098
2099 /* copy each tre into transfer ring */
2100 for_each_sg(sgl, sg, sg_len, i)
2101 for (j = 0, tre = sg_virt(sg); j < sg->length;
2102 j += ch_ring->el_size, tre += ch_ring->el_size)
Sujeev Dias8fc26002017-11-29 20:51:40 -08002103 gpi_queue_xfer(gpii, gpii_chan, tre, &wp);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002104
2105 /* set up the descriptor */
2106 gpi_desc->db = ch_ring->wp;
2107 gpi_desc->wp = wp;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002108 gpi_desc->gpii_chan = gpii_chan;
2109 GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
2110 to_physical(ch_ring, ch_ring->wp),
2111 to_physical(ch_ring, ch_ring->rp));
2112
2113 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2114}
2115
2116/* rings transfer ring db to being transfer */
2117static void gpi_issue_pending(struct dma_chan *chan)
2118{
2119 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2120 struct gpii *gpii = gpii_chan->gpii;
2121 unsigned long flags, pm_lock_flags;
2122 struct virt_dma_desc *vd = NULL;
2123 struct gpi_desc *gpi_desc;
2124
2125 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2126
2127 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2128
2129 /* move all submitted discriptors to issued list */
2130 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2131 if (vchan_issue_pending(&gpii_chan->vc))
2132 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2133 struct virt_dma_desc, node);
2134 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2135
2136 /* nothing to do list is empty */
2137 if (!vd) {
2138 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2139 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2140 return;
2141 }
2142
2143 gpi_desc = to_gpi_desc(vd);
2144 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2145 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2146}
2147
2148/* configure or issue async command */
2149static int gpi_config(struct dma_chan *chan,
2150 struct dma_slave_config *config)
2151{
2152 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2153 struct gpii *gpii = gpii_chan->gpii;
2154 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2155 const int ev_factor = gpii->gpi_dev->ev_factor;
2156 u32 elements;
2157 int i = 0;
2158 int ret = 0;
2159
2160 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2161 if (!gpi_ctrl) {
2162 GPII_ERR(gpii, gpii_chan->chid,
2163 "no config ctrl data provided");
2164 return -EINVAL;
2165 }
2166
2167 mutex_lock(&gpii->ctrl_lock);
2168
2169 switch (gpi_ctrl->cmd) {
2170 case MSM_GPI_INIT:
2171 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2172
2173 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2174 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2175 gpii_chan->pm_state = CONFIG_STATE;
2176
2177 /* check if both channels are configured before continue */
2178 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2179 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2180 goto exit_gpi_init;
2181
2182 /* configure to highest priority from two channels */
2183 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2184 gpii->gpii_chan[1].priority);
2185
2186 /* protocol must be same for both channels */
2187 if (gpii->gpii_chan[0].protocol !=
2188 gpii->gpii_chan[1].protocol) {
2189 GPII_ERR(gpii, gpii_chan->chid,
2190 "protocol did not match protocol %u != %u\n",
2191 gpii->gpii_chan[0].protocol,
2192 gpii->gpii_chan[1].protocol);
2193 ret = -EINVAL;
2194 goto exit_gpi_init;
2195 }
2196 gpii->protocol = gpii_chan->protocol;
2197
2198 /* allocate memory for event ring */
2199 elements = max(gpii->gpii_chan[0].req_tres,
2200 gpii->gpii_chan[1].req_tres);
2201 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002202 sizeof(union gpi_event), gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002203 if (ret) {
2204 GPII_ERR(gpii, gpii_chan->chid,
2205 "error allocating mem for ev ring\n");
2206 goto exit_gpi_init;
2207 }
2208
2209 /* configure interrupts */
2210 write_lock_irq(&gpii->pm_lock);
2211 gpii->pm_state = PREPARE_HARDWARE;
2212 write_unlock_irq(&gpii->pm_lock);
2213 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2214 if (ret) {
2215 GPII_ERR(gpii, gpii_chan->chid,
2216 "error config. interrupts, ret:%d\n", ret);
2217 goto error_config_int;
2218 }
2219
2220 /* allocate event rings */
2221 ret = gpi_alloc_ev_chan(gpii);
2222 if (ret) {
2223 GPII_ERR(gpii, gpii_chan->chid,
2224 "error alloc_ev_chan:%d\n", ret);
2225 goto error_alloc_ev_ring;
2226 }
2227
2228 /* Allocate all channels */
2229 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2230 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2231 if (ret) {
2232 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2233 "Error allocating chan:%d\n", ret);
2234 goto error_alloc_chan;
2235 }
2236 }
2237
2238 /* start channels */
2239 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2240 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2241 if (ret) {
2242 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2243 "Error start chan:%d\n", ret);
2244 goto error_start_chan;
2245 }
2246 }
2247
2248 break;
2249 case MSM_GPI_CMD_UART_SW_STALE:
2250 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2251 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2252 break;
2253 case MSM_GPI_CMD_UART_RFR_READY:
2254 GPII_INFO(gpii, gpii_chan->chid,
2255 "sending UART RFR READY cmd\n");
2256 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2257 break;
2258 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2259 GPII_INFO(gpii, gpii_chan->chid,
2260 "sending UART RFR READY NOT READY cmd\n");
2261 ret = gpi_send_cmd(gpii, gpii_chan,
2262 GPI_CH_CMD_UART_RFR_NOT_READY);
2263 break;
2264 default:
2265 GPII_ERR(gpii, gpii_chan->chid,
2266 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2267 ret = -EINVAL;
2268 }
2269
2270 mutex_unlock(&gpii->ctrl_lock);
2271 return ret;
2272
2273error_start_chan:
2274 for (i = i - 1; i >= 0; i++) {
2275 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2276 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2277 }
2278 i = 2;
2279error_alloc_chan:
2280 for (i = i - 1; i >= 0; i--)
2281 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2282error_alloc_ev_ring:
2283 gpi_disable_interrupts(gpii);
2284error_config_int:
2285 gpi_free_ring(&gpii->ev_ring, gpii);
2286exit_gpi_init:
2287 mutex_unlock(&gpii->ctrl_lock);
2288 return ret;
2289}
2290
2291/* release all channel resources */
2292static void gpi_free_chan_resources(struct dma_chan *chan)
2293{
2294 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2295 struct gpii *gpii = gpii_chan->gpii;
2296 enum gpi_pm_state cur_state;
2297 int ret, i;
2298
2299 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2300
2301 mutex_lock(&gpii->ctrl_lock);
2302
2303 cur_state = gpii_chan->pm_state;
2304
2305 /* disable ch state so no more TRE processing for this channel */
2306 write_lock_irq(&gpii->pm_lock);
2307 gpii_chan->pm_state = PREPARE_TERMINATE;
2308 write_unlock_irq(&gpii->pm_lock);
2309
2310 /* attemp to do graceful hardware shutdown */
2311 if (cur_state == ACTIVE_STATE) {
2312 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2313 if (ret)
2314 GPII_ERR(gpii, gpii_chan->chid,
2315 "error stopping channel:%d\n", ret);
2316
2317 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2318 if (ret)
2319 GPII_ERR(gpii, gpii_chan->chid,
2320 "error resetting channel:%d\n", ret);
2321
2322 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2323 }
2324
2325 /* free all allocated memory */
2326 gpi_free_ring(&gpii_chan->ch_ring, gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002327 vchan_free_chan_resources(&gpii_chan->vc);
2328
2329 write_lock_irq(&gpii->pm_lock);
2330 gpii_chan->pm_state = DISABLE_STATE;
2331 write_unlock_irq(&gpii->pm_lock);
2332
2333 /* if other rings are still active exit */
2334 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2335 if (gpii->gpii_chan[i].ch_ring.configured)
2336 goto exit_free;
2337
2338 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2339
2340 /* deallocate EV Ring */
2341 cur_state = gpii->pm_state;
2342 write_lock_irq(&gpii->pm_lock);
2343 gpii->pm_state = PREPARE_TERMINATE;
2344 write_unlock_irq(&gpii->pm_lock);
2345
2346 /* wait for threads to complete out */
2347 tasklet_kill(&gpii->ev_task);
2348
2349 /* send command to de allocate event ring */
2350 if (cur_state == ACTIVE_STATE)
2351 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2352
2353 gpi_free_ring(&gpii->ev_ring, gpii);
2354
2355 /* disable interrupts */
2356 if (cur_state == ACTIVE_STATE)
2357 gpi_disable_interrupts(gpii);
2358
2359 /* set final state to disable */
2360 write_lock_irq(&gpii->pm_lock);
2361 gpii->pm_state = DISABLE_STATE;
2362 write_unlock_irq(&gpii->pm_lock);
2363
2364exit_free:
2365 mutex_unlock(&gpii->ctrl_lock);
2366}
2367
2368/* allocate channel resources */
2369static int gpi_alloc_chan_resources(struct dma_chan *chan)
2370{
2371 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2372 struct gpii *gpii = gpii_chan->gpii;
2373 int ret;
2374
2375 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2376
2377 mutex_lock(&gpii->ctrl_lock);
2378
2379 /* allocate memory for transfer ring */
2380 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
Sujeev Dias8fc26002017-11-29 20:51:40 -08002381 sizeof(struct msm_gpi_tre), gpii);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002382 if (ret) {
2383 GPII_ERR(gpii, gpii_chan->chid,
2384 "error allocating xfer ring, ret:%d\n", ret);
2385 goto xfer_alloc_err;
2386 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002387 mutex_unlock(&gpii->ctrl_lock);
2388
2389 return 0;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002390xfer_alloc_err:
2391 mutex_unlock(&gpii->ctrl_lock);
2392
2393 return ret;
2394}
2395
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002396static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2397{
2398 int gpii;
2399 struct gpii_chan *tx_chan, *rx_chan;
2400
2401 /* check if same seid is already configured for another chid */
2402 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2403 if (!((1 << gpii) & gpi_dev->gpii_mask))
2404 continue;
2405
2406 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2407 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2408
2409 if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2410 return gpii;
2411 if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2412 return gpii;
2413 }
2414
2415 /* no channels configured with same seid, return next avail gpii */
2416 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2417 if (!((1 << gpii) & gpi_dev->gpii_mask))
2418 continue;
2419
2420 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2421 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2422
2423 /* check if gpii is configured */
2424 if (tx_chan->vc.chan.client_count ||
2425 rx_chan->vc.chan.client_count)
2426 continue;
2427
2428 /* found a free gpii */
2429 return gpii;
2430 }
2431
2432 /* no gpii instance available to use */
2433 return -EIO;
2434}
2435
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002436/* gpi_of_dma_xlate: open client requested channel */
2437static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2438 struct of_dma *of_dma)
2439{
2440 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002441 u32 seid, chid;
2442 int gpii;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002443 struct gpii_chan *gpii_chan;
2444
2445 if (args->args_count < REQ_OF_DMA_ARGS) {
2446 GPI_ERR(gpi_dev,
2447 "gpii require minimum 6 args, client passed:%d args\n",
2448 args->args_count);
2449 return NULL;
2450 }
2451
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002452 chid = args->args[0];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002453 if (chid >= MAX_CHANNELS_PER_GPII) {
2454 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2455 return NULL;
2456 }
2457
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002458 seid = args->args[1];
2459
2460 /* find next available gpii to use */
2461 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2462 if (gpii < 0) {
2463 GPI_ERR(gpi_dev, "no available gpii instances\n");
2464 return NULL;
2465 }
2466
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002467 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002468 if (gpii_chan->vc.chan.client_count) {
2469 GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
2470 gpii, chid, gpii_chan->seid);
2471 return NULL;
2472 }
2473
2474 /* get ring size, protocol, se_id, and priority */
2475 gpii_chan->seid = seid;
2476 gpii_chan->protocol = args->args[2];
2477 gpii_chan->req_tres = args->args[3];
2478 gpii_chan->priority = args->args[4];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002479
2480 GPI_LOG(gpi_dev,
2481 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2482 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2483 gpii_chan->protocol);
2484
2485 return dma_get_slave_channel(&gpii_chan->vc.chan);
2486}
2487
2488/* gpi_setup_debug - setup debug capabilities */
2489static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2490{
2491 char node_name[GPI_LABEL_SIZE];
2492 const umode_t mode = 0600;
2493 int i;
2494
2495 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2496 (u64)gpi_dev->res->start);
2497
2498 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2499 node_name, 0);
2500 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2501 if (!IS_ERR_OR_NULL(pdentry)) {
2502 snprintf(node_name, sizeof(node_name), "%llx",
2503 (u64)gpi_dev->res->start);
2504 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2505 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2506 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2507 &gpi_dev->ipc_log_lvl);
2508 debugfs_create_u32("klog_lvl", mode,
2509 gpi_dev->dentry, &gpi_dev->klog_lvl);
2510 }
2511 }
2512
2513 for (i = 0; i < gpi_dev->max_gpii; i++) {
2514 struct gpii *gpii;
2515
2516 if (!((1 << i) & gpi_dev->gpii_mask))
2517 continue;
2518
2519 gpii = &gpi_dev->gpiis[i];
2520 snprintf(gpii->label, sizeof(gpii->label),
2521 "%s%llx_gpii%d",
2522 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2523 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2524 gpii->label, 0);
2525 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2526 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2527
2528 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2529 continue;
2530
2531 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2532 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2533 if (IS_ERR_OR_NULL(gpii->dentry))
2534 continue;
2535
2536 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2537 &gpii->ipc_log_lvl);
2538 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2539 &gpii->klog_lvl);
2540 }
2541}
2542
Sujeev Dias69484212017-08-31 10:06:53 -07002543static struct dma_iommu_mapping *gpi_create_mapping(struct gpi_dev *gpi_dev)
2544{
2545 dma_addr_t base;
2546 size_t size;
2547
2548 /*
2549 * If S1_BYPASS enabled then iommu space is not used, however framework
2550 * still require clients to create a mapping space before attaching. So
2551 * set to smallest size required by iommu framework.
2552 */
2553 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2554 base = 0;
2555 size = PAGE_SIZE;
2556 } else {
2557 base = gpi_dev->iova_base;
2558 size = gpi_dev->iova_size;
2559 }
2560
2561 GPI_LOG(gpi_dev, "Creating iommu mapping of base:0x%llx size:%lu\n",
2562 base, size);
2563
2564 return arm_iommu_create_mapping(&platform_bus_type, base, size);
2565}
2566
2567static int gpi_dma_mask(struct gpi_dev *gpi_dev)
2568{
2569 int mask = 64;
2570
2571 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2572 unsigned long addr;
2573
2574 addr = gpi_dev->iova_base + gpi_dev->iova_size + 1;
2575 mask = find_last_bit(&addr, 64);
2576 }
2577
2578 GPI_LOG(gpi_dev, "Setting dma mask to %d\n", mask);
2579
2580 return dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(mask));
2581}
2582
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002583static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2584{
Sujeev Dias69484212017-08-31 10:06:53 -07002585 struct dma_iommu_mapping *mapping = NULL;
2586 int ret;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002587
Sujeev Dias69484212017-08-31 10:06:53 -07002588 if (gpi_dev->smmu_cfg) {
2589
2590 /* create mapping table */
2591 mapping = gpi_create_mapping(gpi_dev);
2592 if (IS_ERR(mapping)) {
2593 GPI_ERR(gpi_dev,
2594 "Failed to create iommu mapping, ret:%ld\n",
2595 PTR_ERR(mapping));
2596 return PTR_ERR(mapping);
2597 }
2598
2599 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2600 int s1_bypass = 1;
2601
2602 ret = iommu_domain_set_attr(mapping->domain,
2603 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
2604 if (ret) {
2605 GPI_ERR(gpi_dev,
2606 "Failed to set attr S1_BYPASS, ret:%d\n",
2607 ret);
2608 goto release_mapping;
2609 }
2610 }
2611
2612 if (gpi_dev->smmu_cfg & GPI_SMMU_FAST) {
2613 int fast = 1;
2614
2615 ret = iommu_domain_set_attr(mapping->domain,
2616 DOMAIN_ATTR_FAST, &fast);
2617 if (ret) {
2618 GPI_ERR(gpi_dev,
2619 "Failed to set attr FAST, ret:%d\n",
2620 ret);
2621 goto release_mapping;
2622 }
2623 }
2624
2625 if (gpi_dev->smmu_cfg & GPI_SMMU_ATOMIC) {
2626 int atomic = 1;
2627
2628 ret = iommu_domain_set_attr(mapping->domain,
2629 DOMAIN_ATTR_ATOMIC, &atomic);
2630 if (ret) {
2631 GPI_ERR(gpi_dev,
2632 "Failed to set attr ATOMIC, ret:%d\n",
2633 ret);
2634 goto release_mapping;
2635 }
2636 }
2637
2638 ret = arm_iommu_attach_device(gpi_dev->dev, mapping);
2639 if (ret) {
2640 GPI_ERR(gpi_dev,
2641 "Failed with iommu_attach, ret:%d\n", ret);
2642 goto release_mapping;
2643 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002644 }
2645
Sujeev Dias69484212017-08-31 10:06:53 -07002646 ret = gpi_dma_mask(gpi_dev);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002647 if (ret) {
Sujeev Dias69484212017-08-31 10:06:53 -07002648 GPI_ERR(gpi_dev, "Error setting dma_mask, ret:%d\n", ret);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002649 goto error_set_mask;
2650 }
2651
2652 return ret;
2653
2654error_set_mask:
Sujeev Dias69484212017-08-31 10:06:53 -07002655 if (gpi_dev->smmu_cfg)
2656 arm_iommu_detach_device(gpi_dev->dev);
2657release_mapping:
2658 if (mapping)
2659 arm_iommu_release_mapping(mapping);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002660 return ret;
2661}
2662
2663static int gpi_probe(struct platform_device *pdev)
2664{
2665 struct gpi_dev *gpi_dev;
2666 int ret, i;
2667
2668 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2669 if (!gpi_dev)
2670 return -ENOMEM;
2671
2672 gpi_dev->dev = &pdev->dev;
2673 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2674 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2675 "gpi-top");
2676 if (!gpi_dev->res) {
2677 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2678 return -EINVAL;
2679 }
2680 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2681 resource_size(gpi_dev->res));
2682 if (!gpi_dev->regs) {
2683 GPI_ERR(gpi_dev, "IO remap failed\n");
2684 return -EFAULT;
2685 }
2686
2687 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2688 &gpi_dev->max_gpii);
2689 if (ret) {
2690 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2691 return ret;
2692 }
2693
2694 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2695 &gpi_dev->gpii_mask);
2696 if (ret) {
2697 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2698 return ret;
2699 }
2700
2701 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2702 &gpi_dev->ev_factor);
2703 if (ret) {
2704 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2705 return ret;
2706 }
2707
Sujeev Dias69484212017-08-31 10:06:53 -07002708 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,smmu-cfg",
2709 &gpi_dev->smmu_cfg);
2710 if (ret) {
2711 GPI_ERR(gpi_dev, "missing 'qcom,smmu-cfg' DT node\n");
2712 return ret;
2713 }
2714 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2715 u64 iova_range[2];
2716
2717 ret = of_property_count_elems_of_size(gpi_dev->dev->of_node,
2718 "qcom,iova-range",
2719 sizeof(iova_range));
2720 if (ret != 1) {
2721 GPI_ERR(gpi_dev,
2722 "missing or incorrect 'qcom,iova-range' DT node ret:%d\n",
2723 ret);
2724 }
2725
2726 ret = of_property_read_u64_array(gpi_dev->dev->of_node,
2727 "qcom,iova-range", iova_range,
2728 sizeof(iova_range) / sizeof(u64));
2729 if (ret) {
2730 GPI_ERR(gpi_dev,
2731 "could not read DT prop 'qcom,iova-range\n");
2732 return ret;
2733 }
2734 gpi_dev->iova_base = iova_range[0];
2735 gpi_dev->iova_size = iova_range[1];
2736 }
2737
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002738 ret = gpi_smmu_init(gpi_dev);
2739 if (ret) {
2740 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2741 return ret;
2742 }
2743
2744 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2745 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2746 GFP_KERNEL);
2747 if (!gpi_dev->gpiis)
2748 return -ENOMEM;
2749
2750
2751 /* setup all the supported gpii */
2752 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2753 for (i = 0; i < gpi_dev->max_gpii; i++) {
2754 struct gpii *gpii = &gpi_dev->gpiis[i];
2755 int chan;
2756
2757 if (!((1 << i) & gpi_dev->gpii_mask))
2758 continue;
2759
2760 /* set up ev cntxt register map */
2761 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2762 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2763 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2764 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2765 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2766 CNTXT_2_RING_BASE_LSB;
2767 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2768 CNTXT_4_RING_RP_LSB;
2769 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2770 CNTXT_6_RING_WP_LSB;
2771 gpii->ev_cmd_reg = gpi_dev->regs +
2772 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2773 gpii->ieob_src_reg = gpi_dev->regs +
2774 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2775 gpii->ieob_clr_reg = gpi_dev->regs +
2776 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2777
2778 /* set up irq */
2779 ret = platform_get_irq(pdev, i);
2780 if (ret < 0) {
2781 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2782 i, ret);
2783 return ret;
2784 }
2785 gpii->irq = ret;
2786
2787 /* set up channel specific register info */
2788 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2789 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2790
2791 /* set up ch cntxt register map */
2792 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2793 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2794 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2795 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2796 gpii_chan->ch_ring_base_lsb_reg =
2797 gpii_chan->ch_cntxt_base_reg +
2798 CNTXT_2_RING_BASE_LSB;
2799 gpii_chan->ch_ring_rp_lsb_reg =
2800 gpii_chan->ch_cntxt_base_reg +
2801 CNTXT_4_RING_RP_LSB;
2802 gpii_chan->ch_ring_wp_lsb_reg =
2803 gpii_chan->ch_cntxt_base_reg +
2804 CNTXT_6_RING_WP_LSB;
2805 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2806 GPI_GPII_n_CH_CMD_OFFS(i);
2807
2808 /* vchan setup */
2809 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2810 gpii_chan->vc.desc_free = gpi_desc_free;
2811 gpii_chan->chid = chan;
2812 gpii_chan->gpii = gpii;
2813 gpii_chan->dir = GPII_CHAN_DIR[chan];
2814 }
2815 mutex_init(&gpii->ctrl_lock);
2816 rwlock_init(&gpii->pm_lock);
2817 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2818 (unsigned long)gpii);
2819 init_completion(&gpii->cmd_completion);
2820 gpii->gpii_id = i;
2821 gpii->regs = gpi_dev->regs;
2822 gpii->gpi_dev = gpi_dev;
2823 atomic_set(&gpii->dbg_index, 0);
2824 }
2825
2826 platform_set_drvdata(pdev, gpi_dev);
2827
2828 /* clear and Set capabilities */
2829 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2830 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2831
2832 /* configure dmaengine apis */
2833 gpi_dev->dma_device.directions =
2834 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2835 gpi_dev->dma_device.residue_granularity =
2836 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2837 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2838 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2839 gpi_dev->dma_device.device_alloc_chan_resources =
2840 gpi_alloc_chan_resources;
2841 gpi_dev->dma_device.device_free_chan_resources =
2842 gpi_free_chan_resources;
2843 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2844 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2845 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2846 gpi_dev->dma_device.device_config = gpi_config;
2847 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2848 gpi_dev->dma_device.dev = gpi_dev->dev;
2849 gpi_dev->dma_device.device_pause = gpi_pause;
2850 gpi_dev->dma_device.device_resume = gpi_resume;
2851
2852 /* register with dmaengine framework */
2853 ret = dma_async_device_register(&gpi_dev->dma_device);
2854 if (ret) {
2855 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2856 return ret;
2857 }
2858
2859 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2860 gpi_of_dma_xlate, gpi_dev);
2861 if (ret) {
2862 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2863 return ret;
2864 }
2865
2866 /* setup debug capabilities */
2867 gpi_setup_debug(gpi_dev);
2868 GPI_LOG(gpi_dev, "probe success\n");
2869
2870 return ret;
2871}
2872
2873static const struct of_device_id gpi_of_match[] = {
2874 { .compatible = "qcom,gpi-dma" },
2875 {}
2876};
2877MODULE_DEVICE_TABLE(of, gpi_of_match);
2878
2879static struct platform_driver gpi_driver = {
2880 .probe = gpi_probe,
2881 .driver = {
2882 .name = GPI_DMA_DRV_NAME,
2883 .of_match_table = gpi_of_match,
2884 },
2885};
2886
2887static int __init gpi_init(void)
2888{
2889 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2890 return platform_driver_register(&gpi_driver);
2891}
2892module_init(gpi_init)
2893
2894MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2895MODULE_LICENSE("GPL v2");