blob: da0e81da8691d4e005c38681d6fe1762790cefa8 [file] [log] [blame]
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
59#define GPII_REG(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, \
62 ch, __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_VERB(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
70 pr_info("%s:%u:%s: " fmt, gpii->label, \
71 ch, __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_INFO(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_INFO) \
79 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86#define GPII_ERR(gpii, ch, fmt, ...) do { \
87 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
88 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
89 __func__, ##__VA_ARGS__); \
90 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
91 ipc_log_string(gpii->ilctxt, \
92 "ch:%u %s: " fmt, ch, \
93 __func__, ##__VA_ARGS__); \
94 } while (0)
95#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
96 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
97 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
98 __func__, ##__VA_ARGS__); \
99 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
100 ipc_log_string(gpii->ilctxt, \
101 "ch:%u %s: " fmt, ch, \
102 __func__, ##__VA_ARGS__); \
103 } while (0)
104
105enum DEBUG_LOG_LVL {
106 LOG_LVL_MASK_ALL,
107 LOG_LVL_CRITICAL,
108 LOG_LVL_ERROR,
109 LOG_LVL_INFO,
110 LOG_LVL_VERBOSE,
111 LOG_LVL_REG_ACCESS,
112};
113
114enum EV_PRIORITY {
115 EV_PRIORITY_ISR,
116 EV_PRIORITY_TASKLET,
117};
118
119#define GPI_DMA_DRV_NAME "gpi_dma"
120#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
121#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
122#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
123#define IPC_LOG_PAGES (40)
124#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800125#define CMD_TIMEOUT_MS (1000)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700126#else
127#define IPC_LOG_PAGES (2)
128#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
129#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
Sujeev Dias5fa1d9f2017-11-17 15:22:34 -0800130#define CMD_TIMEOUT_MS (250)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700131#endif
132
133#define GPI_LABEL_SIZE (256)
134#define GPI_DBG_COMMON (99)
135#define MAX_CHANNELS_PER_GPII (2)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700136#define GPI_TX_CHAN (0)
137#define GPI_RX_CHAN (1)
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700138#define STATE_IGNORE (U32_MAX)
Sujeev Diasdfe09e12017-08-31 18:31:04 -0700139#define REQ_OF_DMA_ARGS (5) /* # of arguments required from client */
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700140
141struct __packed gpi_error_log_entry {
142 u32 routine : 4;
143 u32 type : 4;
144 u32 reserved0 : 4;
145 u32 code : 4;
146 u32 reserved1 : 3;
147 u32 chid : 5;
148 u32 reserved2 : 1;
149 u32 chtype : 1;
150 u32 ee : 1;
151};
152
153struct __packed xfer_compl_event {
154 u64 ptr;
155 u32 length : 24;
156 u8 code;
157 u16 status;
158 u8 type;
159 u8 chid;
160};
161
162struct __packed immediate_data_event {
163 u8 data_bytes[8];
164 u8 length : 4;
165 u8 resvd : 4;
166 u16 tre_index;
167 u8 code;
168 u16 status;
169 u8 type;
170 u8 chid;
171};
172
173struct __packed qup_notif_event {
174 u32 status;
175 u32 time;
176 u32 count :24;
177 u8 resvd;
178 u16 resvd1;
179 u8 type;
180 u8 chid;
181};
182
183struct __packed gpi_ere {
184 u32 dword[4];
185};
186
187enum GPI_EV_TYPE {
188 XFER_COMPLETE_EV_TYPE = 0x22,
189 IMMEDIATE_DATA_EV_TYPE = 0x30,
190 QUP_NOTIF_EV_TYPE = 0x31,
191 STALE_EV_TYPE = 0xFF,
192};
193
194union __packed gpi_event {
195 struct __packed xfer_compl_event xfer_compl_event;
196 struct __packed immediate_data_event immediate_data_event;
197 struct __packed qup_notif_event qup_notif_event;
198 struct __packed gpi_ere gpi_ere;
199};
200
201enum gpii_irq_settings {
202 DEFAULT_IRQ_SETTINGS,
203 MASK_IEOB_SETTINGS,
204};
205
206enum gpi_ev_state {
207 DEFAULT_EV_CH_STATE = 0,
208 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
209 EV_STATE_ALLOCATED,
210 MAX_EV_STATES
211};
212
213static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
214 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
215 [EV_STATE_ALLOCATED] = "ALLOCATED",
216};
217
218#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
219 "INVALID" : gpi_ev_state_str[state])
220
221enum gpi_ch_state {
222 DEFAULT_CH_STATE = 0x0,
223 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
224 CH_STATE_ALLOCATED = 0x1,
225 CH_STATE_STARTED = 0x2,
226 CH_STATE_STOPPED = 0x3,
227 CH_STATE_STOP_IN_PROC = 0x4,
228 CH_STATE_ERROR = 0xf,
229 MAX_CH_STATES
230};
231
232static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
233 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
234 [CH_STATE_ALLOCATED] = "ALLOCATED",
235 [CH_STATE_STARTED] = "STARTED",
236 [CH_STATE_STOPPED] = "STOPPED",
237 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
238 [CH_STATE_ERROR] = "ERROR",
239};
240
241#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
242 "INVALID" : gpi_ch_state_str[state])
243
244enum gpi_cmd {
245 GPI_CH_CMD_BEGIN,
246 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
247 GPI_CH_CMD_START,
248 GPI_CH_CMD_STOP,
249 GPI_CH_CMD_RESET,
250 GPI_CH_CMD_DE_ALLOC,
251 GPI_CH_CMD_UART_SW_STALE,
252 GPI_CH_CMD_UART_RFR_READY,
253 GPI_CH_CMD_UART_RFR_NOT_READY,
254 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
255 GPI_EV_CMD_BEGIN,
256 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
257 GPI_EV_CMD_RESET,
258 GPI_EV_CMD_DEALLOC,
259 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
260 GPI_MAX_CMD,
261};
262
263#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
264
265static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
266 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
267 [GPI_CH_CMD_START] = "CH START",
268 [GPI_CH_CMD_STOP] = "CH STOP",
269 [GPI_CH_CMD_RESET] = "CH_RESET",
270 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
271 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
272 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
273 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
274 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
275 [GPI_EV_CMD_RESET] = "EV RESET",
276 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
277};
278
279#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
280 gpi_cmd_str[cmd])
281
282static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
283 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
284 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
285 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
286 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
287 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
288 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
289};
290
291#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
292 "INVALID" : gpi_cb_event_str[event])
293
294enum se_protocol {
295 SE_PROTOCOL_SPI = 1,
296 SE_PROTOCOL_UART = 2,
297 SE_PROTOCOL_I2C = 3,
298 SE_MAX_PROTOCOL
299};
300
301/*
302 * @DISABLE_STATE: no register access allowed
303 * @CONFIG_STATE: client has configured the channel
304 * @PREP_HARDWARE: register access is allowed
305 * however, no processing EVENTS
306 * @ACTIVE_STATE: channels are fully operational
307 * @PREPARE_TERIMNATE: graceful termination of channels
308 * register access is allowed
309 * @PAUSE_STATE: channels are active, but not processing any events
310 */
311enum gpi_pm_state {
312 DISABLE_STATE,
313 CONFIG_STATE,
314 PREPARE_HARDWARE,
315 ACTIVE_STATE,
316 PREPARE_TERMINATE,
317 PAUSE_STATE,
318 MAX_PM_STATE
319};
320
321#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
322
323static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
324 [DISABLE_STATE] = "DISABLE",
325 [CONFIG_STATE] = "CONFIG",
326 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
327 [ACTIVE_STATE] = "ACTIVE",
328 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
329 [PAUSE_STATE] = "PAUSE",
330};
331
332#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
333 "INVALID" : gpi_pm_state_str[state])
334
335static const struct {
336 enum gpi_cmd gpi_cmd;
337 u32 opcode;
338 u32 state;
339 u32 timeout_ms;
340} gpi_cmd_info[GPI_MAX_CMD] = {
341 {
342 GPI_CH_CMD_ALLOCATE,
343 GPI_GPII_n_CH_CMD_ALLOCATE,
344 CH_STATE_ALLOCATED,
345 CMD_TIMEOUT_MS,
346 },
347 {
348 GPI_CH_CMD_START,
349 GPI_GPII_n_CH_CMD_START,
350 CH_STATE_STARTED,
351 CMD_TIMEOUT_MS,
352 },
353 {
354 GPI_CH_CMD_STOP,
355 GPI_GPII_n_CH_CMD_STOP,
356 CH_STATE_STOPPED,
357 CMD_TIMEOUT_MS,
358 },
359 {
360 GPI_CH_CMD_RESET,
361 GPI_GPII_n_CH_CMD_RESET,
362 CH_STATE_ALLOCATED,
363 CMD_TIMEOUT_MS,
364 },
365 {
366 GPI_CH_CMD_DE_ALLOC,
367 GPI_GPII_n_CH_CMD_DE_ALLOC,
368 CH_STATE_NOT_ALLOCATED,
369 CMD_TIMEOUT_MS,
370 },
371 {
372 GPI_CH_CMD_UART_SW_STALE,
373 GPI_GPII_n_CH_CMD_UART_SW_STALE,
374 STATE_IGNORE,
375 CMD_TIMEOUT_MS,
376 },
377 {
378 GPI_CH_CMD_UART_RFR_READY,
379 GPI_GPII_n_CH_CMD_UART_RFR_READY,
380 STATE_IGNORE,
381 CMD_TIMEOUT_MS,
382 },
383 {
384 GPI_CH_CMD_UART_RFR_NOT_READY,
385 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
386 STATE_IGNORE,
387 CMD_TIMEOUT_MS,
388 },
389 {
390 GPI_EV_CMD_ALLOCATE,
391 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
392 EV_STATE_ALLOCATED,
393 CMD_TIMEOUT_MS,
394 },
395 {
396 GPI_EV_CMD_RESET,
397 GPI_GPII_n_EV_CH_CMD_RESET,
398 EV_STATE_ALLOCATED,
399 CMD_TIMEOUT_MS,
400 },
401 {
402 GPI_EV_CMD_DEALLOC,
403 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
404 EV_STATE_NOT_ALLOCATED,
405 CMD_TIMEOUT_MS,
406 },
407};
408
409struct gpi_ring {
410 void *pre_aligned;
411 size_t alloc_size;
412 phys_addr_t phys_addr;
413 dma_addr_t dma_handle;
414 void *base;
415 void *wp;
416 void *rp;
417 u32 len;
418 u32 el_size;
419 u32 elements;
420 bool configured;
421};
422
423struct sg_tre {
424 void *ptr;
425 void *wp; /* store chan wp for debugging */
426};
427
428struct gpi_dbg_log {
429 void *addr;
430 u64 time;
431 u32 val;
432 bool read;
433};
434
435struct gpi_dev {
436 struct dma_device dma_device;
437 struct device *dev;
438 struct resource *res;
439 void __iomem *regs;
440 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
441 u32 gpii_mask; /* gpii instances available for apps */
442 u32 ev_factor; /* ev ring length factor */
Sujeev Dias69484212017-08-31 10:06:53 -0700443 u32 smmu_cfg;
444 dma_addr_t iova_base;
445 size_t iova_size;
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700446 struct gpii *gpiis;
447 void *ilctxt;
448 u32 ipc_log_lvl;
449 u32 klog_lvl;
450 struct dentry *dentry;
451};
452
453struct gpii_chan {
454 struct virt_dma_chan vc;
455 u32 chid;
456 u32 seid;
457 enum se_protocol protocol;
458 enum EV_PRIORITY priority; /* comes from clients DT node */
459 struct gpii *gpii;
460 enum gpi_ch_state ch_state;
461 enum gpi_pm_state pm_state;
462 void __iomem *ch_cntxt_base_reg;
463 void __iomem *ch_cntxt_db_reg;
464 void __iomem *ch_ring_base_lsb_reg,
465 *ch_ring_rp_lsb_reg,
466 *ch_ring_wp_lsb_reg;
467 void __iomem *ch_cmd_reg;
468 u32 req_tres; /* # of tre's client requested */
469 u32 dir;
470 struct gpi_ring ch_ring;
471 struct gpi_ring sg_ring; /* points to client scatterlist */
472 struct gpi_client_info client_info;
473};
474
475struct gpii {
476 u32 gpii_id;
477 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
478 struct gpi_dev *gpi_dev;
479 enum EV_PRIORITY ev_priority;
480 enum se_protocol protocol;
481 int irq;
482 void __iomem *regs; /* points to gpi top */
483 void __iomem *ev_cntxt_base_reg;
484 void __iomem *ev_cntxt_db_reg;
485 void __iomem *ev_ring_base_lsb_reg,
486 *ev_ring_rp_lsb_reg,
487 *ev_ring_wp_lsb_reg;
488 void __iomem *ev_cmd_reg;
489 void __iomem *ieob_src_reg;
490 void __iomem *ieob_clr_reg;
491 struct mutex ctrl_lock;
492 enum gpi_ev_state ev_state;
493 bool configured_irq;
494 enum gpi_pm_state pm_state;
495 rwlock_t pm_lock;
496 struct gpi_ring ev_ring;
497 struct tasklet_struct ev_task; /* event processing tasklet */
498 struct completion cmd_completion;
499 enum gpi_cmd gpi_cmd;
500 u32 cntxt_type_irq_msk;
501 void *ilctxt;
502 u32 ipc_log_lvl;
503 u32 klog_lvl;
504 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
505 atomic_t dbg_index;
506 char label[GPI_LABEL_SIZE];
507 struct dentry *dentry;
508};
509
510struct gpi_desc {
511 struct virt_dma_desc vd;
512 void *wp; /* points to TRE last queued during issue_pending */
513 struct sg_tre *sg_tre; /* points to last scatterlist */
514 void *db; /* DB register to program */
515 struct gpii_chan *gpii_chan;
516};
517
Sujeev Dias69484212017-08-31 10:06:53 -0700518#define GPI_SMMU_ATTACH BIT(0)
519#define GPI_SMMU_S1_BYPASS BIT(1)
520#define GPI_SMMU_FAST BIT(2)
521#define GPI_SMMU_ATOMIC BIT(3)
522
Sujeev Diasdd66ce02016-09-07 11:35:11 -0700523const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
524 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
525};
526
527struct dentry *pdentry;
528static irqreturn_t gpi_handle_irq(int irq, void *data);
529static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
530static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
531static void gpi_process_events(struct gpii *gpii);
532
533static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
534{
535 return container_of(dma_chan, struct gpii_chan, vc.chan);
536}
537
538static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
539{
540 return container_of(vd, struct gpi_desc, vd);
541}
542
543static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
544 void *addr)
545{
546 return ring->phys_addr + (addr - ring->base);
547}
548
549static inline void *to_virtual(const struct gpi_ring *const ring,
550 phys_addr_t addr)
551{
552 return ring->base + (addr - ring->phys_addr);
553}
554
555#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
556static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
557{
558 u64 time = sched_clock();
559 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
560 u32 val;
561
562 val = readl_relaxed(addr);
563 index &= (GPI_DBG_LOG_SIZE - 1);
564 (gpii->dbg_log + index)->addr = addr;
565 (gpii->dbg_log + index)->time = time;
566 (gpii->dbg_log + index)->val = val;
567 (gpii->dbg_log + index)->read = true;
568 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
569 addr - gpii->regs, val);
570 return val;
571}
572static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
573{
574 u64 time = sched_clock();
575 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
576
577 index &= (GPI_DBG_LOG_SIZE - 1);
578 (gpii->dbg_log + index)->addr = addr;
579 (gpii->dbg_log + index)->time = time;
580 (gpii->dbg_log + index)->val = val;
581 (gpii->dbg_log + index)->read = false;
582
583 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
584 addr - gpii->regs, val);
585 writel_relaxed(val, addr);
586}
587#else
588static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
589{
590 u32 val = readl_relaxed(addr);
591
592 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
593 addr - gpii->regs, val);
594 return val;
595}
596static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
597{
598 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
599 addr - gpii->regs, val);
600 writel_relaxed(val, addr);
601}
602#endif
603
604/* gpi_write_reg_field - write to specific bit field */
605static inline void gpi_write_reg_field(struct gpii *gpii,
606 void __iomem *addr,
607 u32 mask,
608 u32 shift,
609 u32 val)
610{
611 u32 tmp = gpi_read_reg(gpii, addr);
612
613 tmp &= ~mask;
614 val = tmp | ((val << shift) & mask);
615 gpi_write_reg(gpii, addr, val);
616}
617
618static void gpi_disable_interrupts(struct gpii *gpii)
619{
620 struct {
621 u32 offset;
622 u32 mask;
623 u32 shift;
624 u32 val;
625 } default_reg[] = {
626 {
627 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
628 (gpii->gpii_id),
629 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
630 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
631 0,
632 },
633 {
634 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
635 (gpii->gpii_id),
636 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
637 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
638 0,
639 },
640 {
641 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
642 (gpii->gpii_id),
643 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
644 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
645 0,
646 },
647 {
648 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
649 (gpii->gpii_id),
650 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
651 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
652 0,
653 },
654 {
655 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
656 (gpii->gpii_id),
657 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
658 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
659 0,
660 },
661 {
662 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
663 (gpii->gpii_id),
664 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
665 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
666 0,
667 },
668 {
669 GPI_GPII_n_CNTXT_INTSET_OFFS
670 (gpii->gpii_id),
671 GPI_GPII_n_CNTXT_INTSET_BMSK,
672 GPI_GPII_n_CNTXT_INTSET_SHFT,
673 0,
674 },
675 { 0 },
676 };
677 int i;
678
679 for (i = 0; default_reg[i].offset; i++)
680 gpi_write_reg_field(gpii, gpii->regs +
681 default_reg[i].offset,
682 default_reg[i].mask,
683 default_reg[i].shift,
684 default_reg[i].val);
685 gpii->cntxt_type_irq_msk = 0;
686 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
687 gpii->configured_irq = false;
688}
689
690/* configure and enable interrupts */
691static int gpi_config_interrupts(struct gpii *gpii,
692 enum gpii_irq_settings settings,
693 bool mask)
694{
695 int ret;
696 int i;
697 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
698 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
699 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
700 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
701 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
702 struct {
703 u32 offset;
704 u32 mask;
705 u32 shift;
706 u32 val;
707 } default_reg[] = {
708 {
709 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
710 (gpii->gpii_id),
711 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
712 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
713 def_type,
714 },
715 {
716 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
717 (gpii->gpii_id),
718 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
719 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
720 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
721 },
722 {
723 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
724 (gpii->gpii_id),
725 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
726 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
727 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
728 },
729 {
730 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
731 (gpii->gpii_id),
732 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
733 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
734 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
735 },
736 {
737 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
738 (gpii->gpii_id),
739 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
740 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
741 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
742 },
743 {
744 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
745 (gpii->gpii_id),
746 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
747 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
748 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
749 },
750 {
751 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
752 (gpii->gpii_id),
753 U32_MAX,
754 0,
755 0x0,
756 },
757 {
758 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
759 (gpii->gpii_id),
760 U32_MAX,
761 0,
762 0x0,
763 },
764 {
765 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
766 (gpii->gpii_id),
767 U32_MAX,
768 0,
769 0x0,
770 },
771 {
772 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
773 (gpii->gpii_id),
774 U32_MAX,
775 0,
776 0x0,
777 },
778 {
779 GPI_GPII_n_CNTXT_INTSET_OFFS
780 (gpii->gpii_id),
781 GPI_GPII_n_CNTXT_INTSET_BMSK,
782 GPI_GPII_n_CNTXT_INTSET_SHFT,
783 0x01,
784 },
785 {
786 GPI_GPII_n_ERROR_LOG_OFFS
787 (gpii->gpii_id),
788 U32_MAX,
789 0,
790 0x00,
791 },
792 { 0 },
793 };
794
795 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
796 (gpii->configured_irq) ? 'F' : 'T',
797 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
798 (mask) ? 'T' : 'F');
799
800 if (gpii->configured_irq == false) {
801 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
802 gpi_handle_irq, IRQF_TRIGGER_HIGH,
803 gpii->label, gpii);
804 if (ret < 0) {
805 GPII_CRITIC(gpii, GPI_DBG_COMMON,
806 "error request irq:%d ret:%d\n",
807 gpii->irq, ret);
808 return ret;
809 }
810 }
811
812 if (settings == MASK_IEOB_SETTINGS) {
813 /*
814 * GPII only uses one EV ring per gpii so we can globally
815 * enable/disable IEOB interrupt
816 */
817 if (mask)
818 gpii->cntxt_type_irq_msk |=
819 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
820 else
821 gpii->cntxt_type_irq_msk &=
822 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
823 gpi_write_reg_field(gpii, gpii->regs +
824 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
825 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
826 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
827 gpii->cntxt_type_irq_msk);
828 } else {
829 for (i = 0; default_reg[i].offset; i++)
830 gpi_write_reg_field(gpii, gpii->regs +
831 default_reg[i].offset,
832 default_reg[i].mask,
833 default_reg[i].shift,
834 default_reg[i].val);
835 gpii->cntxt_type_irq_msk = def_type;
836 };
837
838 gpii->configured_irq = true;
839
840 return 0;
841}
842
843/* Sends gpii event or channel command */
844static int gpi_send_cmd(struct gpii *gpii,
845 struct gpii_chan *gpii_chan,
846 enum gpi_cmd gpi_cmd)
847{
848 u32 chid = MAX_CHANNELS_PER_GPII;
849 u32 cmd;
850 unsigned long timeout;
851 void __iomem *cmd_reg;
852
853 if (gpi_cmd >= GPI_MAX_CMD)
854 return -EINVAL;
855 if (IS_CHAN_CMD(gpi_cmd))
856 chid = gpii_chan->chid;
857
858 GPII_INFO(gpii, chid,
859 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
860
861 /* send opcode and wait for completion */
862 reinit_completion(&gpii->cmd_completion);
863 gpii->gpi_cmd = gpi_cmd;
864
865 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
866 gpii->ev_cmd_reg;
867 cmd = IS_CHAN_CMD(gpi_cmd) ?
868 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
869 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
870 gpi_write_reg(gpii, cmd_reg, cmd);
871 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
872 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
873
874 if (!timeout) {
875 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
876 TO_GPI_CMD_STR(gpi_cmd));
877 return -EIO;
878 }
879
880 /* confirm new ch state is correct , if the cmd is a state change cmd */
881 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
882 return 0;
883 if (IS_CHAN_CMD(gpi_cmd) &&
884 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
885 return 0;
886 if (!IS_CHAN_CMD(gpi_cmd) &&
887 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
888 return 0;
889
890 return -EIO;
891}
892
893/* program transfer ring DB register */
894static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
895 struct gpi_ring *ring,
896 void *wp)
897{
898 struct gpii *gpii = gpii_chan->gpii;
899 phys_addr_t p_wp;
900
901 p_wp = to_physical(ring, wp);
902 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
903}
904
905/* program event ring DB register */
906static inline void gpi_write_ev_db(struct gpii *gpii,
907 struct gpi_ring *ring,
908 void *wp)
909{
910 phys_addr_t p_wp;
911
912 p_wp = ring->phys_addr + (wp - ring->base);
913 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
914}
915
916/* notify client with generic event */
917static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
918 enum msm_gpi_cb_event event,
919 u64 status)
920{
921 struct gpii *gpii = gpii_chan->gpii;
922 struct gpi_client_info *client_info = &gpii_chan->client_info;
923 struct msm_gpi_cb msm_gpi_cb = {0};
924
925 GPII_ERR(gpii, gpii_chan->chid,
926 "notifying event:%s with status:%llu\n",
927 TO_GPI_CB_EVENT_STR(event), status);
928
929 msm_gpi_cb.cb_event = event;
930 msm_gpi_cb.status = status;
931 msm_gpi_cb.timestamp = sched_clock();
932 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
933 client_info->cb_param);
934}
935
936/* process transfer completion interrupt */
937static void gpi_process_ieob(struct gpii *gpii)
938{
939 u32 ieob_irq;
940
941 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
942 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
943 GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
944
945 /* process events based on priority */
946 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
947 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
948 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
949 tasklet_schedule(&gpii->ev_task);
950 } else {
951 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
952 gpi_process_events(gpii);
953 }
954}
955
956/* process channel control interrupt */
957static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
958{
959 u32 gpii_id = gpii->gpii_id;
960 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
961 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
962 u32 chid;
963 struct gpii_chan *gpii_chan;
964 u32 state;
965
966 /* clear the status */
967 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
968 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
969
970 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
971 if (!(BIT(chid) & ch_irq))
972 continue;
973
974 gpii_chan = &gpii->gpii_chan[chid];
975 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
976 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
977 CNTXT_0_CONFIG);
978 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
979 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
980
981 /*
982 * CH_CMD_DEALLOC cmd always successful. However cmd does
983 * not change hardware status. So overwriting software state
984 * to default state.
985 */
986 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
987 state = DEFAULT_CH_STATE;
988 gpii_chan->ch_state = state;
989 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
990 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
991
992 /*
993 * Triggering complete all if ch_state is not a stop in process.
994 * Stop in process is a transition state and we will wait for
995 * stop interrupt before notifying.
996 */
997 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
998 complete_all(&gpii->cmd_completion);
999
1000 /* notifying clients if in error state */
1001 if (gpii_chan->ch_state == CH_STATE_ERROR)
1002 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
1003 __LINE__);
1004 }
1005}
1006
1007/* processing gpi level error interrupts */
1008static void gpi_process_glob_err_irq(struct gpii *gpii)
1009{
1010 u32 gpii_id = gpii->gpii_id;
1011 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1012 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1013 u32 error_log;
1014 u32 chid;
1015 struct gpii_chan *gpii_chan;
1016 struct gpi_client_info *client_info;
1017 struct msm_gpi_cb msm_gpi_cb;
1018 struct gpi_error_log_entry *log_entry =
1019 (struct gpi_error_log_entry *)&error_log;
1020
1021 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1022 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1023
1024 /* only error interrupt should be set */
1025 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1026 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1027 irq_stts);
1028 goto error_irq;
1029 }
1030
1031 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1032 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1033 gpi_write_reg(gpii, gpii->regs + offset, 0);
1034
1035 /* get channel info */
1036 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1037 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1038 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1039 chid);
1040 goto error_irq;
1041 }
1042
1043 gpii_chan = &gpii->gpii_chan[chid];
1044 client_info = &gpii_chan->client_info;
1045
1046 /* notify client with error log */
1047 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1048 msm_gpi_cb.error_log.routine = log_entry->routine;
1049 msm_gpi_cb.error_log.type = log_entry->type;
1050 msm_gpi_cb.error_log.error_code = log_entry->code;
1051 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1052 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1053 GPII_ERR(gpii, gpii_chan->chid,
1054 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1055 log_entry->ee, log_entry->chtype,
1056 msm_gpi_cb.error_log.routine,
1057 msm_gpi_cb.error_log.type,
1058 msm_gpi_cb.error_log.error_code);
1059 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1060 client_info->cb_param);
1061
1062 return;
1063
1064error_irq:
1065 for (chid = 0, gpii_chan = gpii->gpii_chan;
1066 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1067 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1068 irq_stts);
1069}
1070
1071/* gpii interrupt handler */
1072static irqreturn_t gpi_handle_irq(int irq, void *data)
1073{
1074 struct gpii *gpii = data;
1075 u32 type;
1076 unsigned long flags;
1077 u32 offset;
1078 u32 gpii_id = gpii->gpii_id;
1079
1080 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1081
1082 read_lock_irqsave(&gpii->pm_lock, flags);
1083
1084 /*
1085 * States are out of sync to receive interrupt
1086 * while software state is in DISABLE state, bailing out.
1087 */
1088 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1089 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1090 "receive interrupt while in %s state\n",
1091 TO_GPI_PM_STR(gpii->pm_state));
1092 goto exit_irq;
1093 }
1094
1095 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1096 type = gpi_read_reg(gpii, gpii->regs + offset);
1097
1098 do {
1099 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1100 type);
1101 /* global gpii error */
1102 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1103 GPII_ERR(gpii, GPI_DBG_COMMON,
1104 "processing global error irq\n");
1105 gpi_process_glob_err_irq(gpii);
1106 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1107 }
1108
1109 /* event control irq */
1110 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1111 u32 ev_state;
1112 u32 ev_ch_irq;
1113
1114 GPII_INFO(gpii, GPI_DBG_COMMON,
1115 "processing EV CTRL interrupt\n");
1116 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1117 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1118
1119 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1120 (gpii_id);
1121 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1122 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1123 CNTXT_0_CONFIG);
1124 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1125 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1126
1127 /*
1128 * CMD EV_CMD_DEALLOC is always successful. However
1129 * cmd does not change hardware status. So overwriting
1130 * software state to default state.
1131 */
1132 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1133 ev_state = DEFAULT_EV_CH_STATE;
1134
1135 gpii->ev_state = ev_state;
1136 GPII_INFO(gpii, GPI_DBG_COMMON,
1137 "setting EV state to %s\n",
1138 TO_GPI_EV_STATE_STR(gpii->ev_state));
1139 complete_all(&gpii->cmd_completion);
1140 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1141 }
1142
1143 /* channel control irq */
1144 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1145 GPII_INFO(gpii, GPI_DBG_COMMON,
1146 "process CH CTRL interrupts\n");
1147 gpi_process_ch_ctrl_irq(gpii);
1148 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1149 }
1150
1151 /* transfer complete interrupt */
1152 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1153 GPII_VERB(gpii, GPI_DBG_COMMON,
1154 "process IEOB interrupts\n");
1155 gpi_process_ieob(gpii);
1156 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1157 }
1158
1159 if (type) {
1160 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1161 "Unhandled interrupt status:0x%x\n", type);
1162 goto exit_irq;
1163 }
1164 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1165 type = gpi_read_reg(gpii, gpii->regs + offset);
1166 } while (type);
1167
1168exit_irq:
1169 read_unlock_irqrestore(&gpii->pm_lock, flags);
1170 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1171
1172 return IRQ_HANDLED;
1173}
1174
1175/* process qup notification events */
1176static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1177 struct qup_notif_event *notif_event)
1178{
1179 struct gpii *gpii = gpii_chan->gpii;
1180 struct gpi_client_info *client_info = &gpii_chan->client_info;
1181 struct msm_gpi_cb msm_gpi_cb;
1182
1183 GPII_VERB(gpii, gpii_chan->chid,
1184 "status:0x%x time:0x%x count:0x%x\n",
1185 notif_event->status, notif_event->time, notif_event->count);
1186
1187 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1188 msm_gpi_cb.status = notif_event->status;
1189 msm_gpi_cb.timestamp = notif_event->time;
1190 msm_gpi_cb.count = notif_event->count;
1191 GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
1192 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1193 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1194 client_info->cb_param);
1195}
1196
1197/* process DMA Immediate completion data events */
1198static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1199 struct immediate_data_event *imed_event)
1200{
1201 struct gpii *gpii = gpii_chan->gpii;
1202 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1203 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1204 struct virt_dma_desc *vd;
1205 struct gpi_desc *gpi_desc;
1206 struct msm_gpi_tre *client_tre;
1207 void *sg_tre;
1208 void *tre = ch_ring->base +
1209 (ch_ring->el_size * imed_event->tre_index);
1210 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
Sujeev Diase0197092017-11-27 20:36:26 -08001211 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001212
1213 /*
1214 * If channel not active don't process event but let
1215 * client know pending event is available
1216 */
1217 if (gpii_chan->pm_state != ACTIVE_STATE) {
1218 GPII_ERR(gpii, gpii_chan->chid,
1219 "skipping processing event because ch @ %s state\n",
1220 TO_GPI_PM_STR(gpii_chan->pm_state));
1221 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1222 __LINE__);
1223 return;
1224 }
1225
Sujeev Diase0197092017-11-27 20:36:26 -08001226 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001227 vd = vchan_next_desc(&gpii_chan->vc);
1228 if (!vd) {
1229 struct gpi_ere *gpi_ere;
1230 struct msm_gpi_tre *gpi_tre;
1231
Sujeev Diase0197092017-11-27 20:36:26 -08001232 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001233 GPII_ERR(gpii, gpii_chan->chid,
1234 "event without a pending descriptor!\n");
1235 gpi_ere = (struct gpi_ere *)imed_event;
1236 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1237 gpi_ere->dword[0], gpi_ere->dword[1],
1238 gpi_ere->dword[2], gpi_ere->dword[3]);
1239 gpi_tre = tre;
1240 GPII_ERR(gpii, gpii_chan->chid,
1241 "Pending TRE: %08x %08x %08x %08x\n",
1242 gpi_tre->dword[0], gpi_tre->dword[1],
1243 gpi_tre->dword[2], gpi_tre->dword[3]);
1244 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1245 __LINE__);
1246 return;
1247 }
1248 gpi_desc = to_gpi_desc(vd);
1249
1250 /* Event TR RP gen. don't match descriptor TR */
1251 if (gpi_desc->wp != tre) {
Sujeev Diase0197092017-11-27 20:36:26 -08001252 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001253 GPII_ERR(gpii, gpii_chan->chid,
1254 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1255 to_physical(ch_ring, gpi_desc->wp),
1256 to_physical(ch_ring, tre));
1257 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1258 __LINE__);
1259 return;
1260 }
1261
1262 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001263 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001264
1265 sg_tre = gpi_desc->sg_tre;
1266 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1267
1268 /*
1269 * RP pointed by Event is to last TRE processed,
1270 * we need to update ring rp to tre + 1
1271 */
1272 tre += ch_ring->el_size;
1273 if (tre >= (ch_ring->base + ch_ring->len))
1274 tre = ch_ring->base;
1275 ch_ring->rp = tre;
1276 sg_tre += sg_ring->el_size;
1277 if (sg_tre >= (sg_ring->base + sg_ring->len))
1278 sg_tre = sg_ring->base;
1279 sg_ring->rp = sg_tre;
1280
1281 /* make sure rp updates are immediately visible to all cores */
1282 smp_wmb();
1283
1284 /* update Immediate data from Event back in to TRE if it's RX channel */
1285 if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
1286 client_tre->dword[0] =
1287 ((struct msm_gpi_tre *)imed_event)->dword[0];
1288 client_tre->dword[1] =
1289 ((struct msm_gpi_tre *)imed_event)->dword[1];
1290 client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
1291 imed_event->length);
1292 }
1293
1294 tx_cb_param = vd->tx.callback_param;
1295 if (tx_cb_param) {
1296 GPII_VERB(gpii, gpii_chan->chid,
1297 "cb_length:%u compl_code:0x%x status:0x%x\n",
1298 imed_event->length, imed_event->code,
1299 imed_event->status);
1300 tx_cb_param->length = imed_event->length;
1301 tx_cb_param->completion_code = imed_event->code;
1302 tx_cb_param->status = imed_event->status;
1303 }
1304
Sujeev Diase0197092017-11-27 20:36:26 -08001305 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001306 vchan_cookie_complete(vd);
Sujeev Diase0197092017-11-27 20:36:26 -08001307 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001308}
1309
1310/* processing transfer completion events */
1311static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1312 struct xfer_compl_event *compl_event)
1313{
1314 struct gpii *gpii = gpii_chan->gpii;
1315 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1316 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1317 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
1318 struct msm_gpi_tre *client_tre;
1319 struct virt_dma_desc *vd;
1320 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1321 struct gpi_desc *gpi_desc;
1322 void *sg_tre = NULL;
Sujeev Diase0197092017-11-27 20:36:26 -08001323 unsigned long flags;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001324
1325 /* only process events on active channel */
1326 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1327 GPII_ERR(gpii, gpii_chan->chid,
1328 "skipping processing event because ch @ %s state\n",
1329 TO_GPI_PM_STR(gpii_chan->pm_state));
1330 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1331 __LINE__);
1332 return;
1333 }
1334
Sujeev Diase0197092017-11-27 20:36:26 -08001335 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001336 vd = vchan_next_desc(&gpii_chan->vc);
1337 if (!vd) {
1338 struct gpi_ere *gpi_ere;
1339
Sujeev Diase0197092017-11-27 20:36:26 -08001340 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001341 GPII_ERR(gpii, gpii_chan->chid,
1342 "Event without a pending descriptor!\n");
1343 gpi_ere = (struct gpi_ere *)compl_event;
1344 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1345 gpi_ere->dword[0], gpi_ere->dword[1],
1346 gpi_ere->dword[2], gpi_ere->dword[3]);
1347 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1348 __LINE__);
1349 return;
1350 }
1351
1352 gpi_desc = to_gpi_desc(vd);
1353
1354 /* TRE Event generated didn't match descriptor's TRE */
1355 if (gpi_desc->wp != ev_rp) {
Sujeev Diase0197092017-11-27 20:36:26 -08001356 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001357 GPII_ERR(gpii, gpii_chan->chid,
1358 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1359 to_physical(ch_ring, gpi_desc->wp),
1360 to_physical(ch_ring, ev_rp));
1361 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1362 __LINE__);
1363 return;
1364 }
1365
1366 list_del(&vd->node);
Sujeev Diase0197092017-11-27 20:36:26 -08001367 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001368
1369 sg_tre = gpi_desc->sg_tre;
1370 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1371
1372 /*
1373 * RP pointed by Event is to last TRE processed,
1374 * we need to update ring rp to ev_rp + 1
1375 */
1376 ev_rp += ch_ring->el_size;
1377 if (ev_rp >= (ch_ring->base + ch_ring->len))
1378 ev_rp = ch_ring->base;
1379 ch_ring->rp = ev_rp;
1380 sg_tre += sg_ring->el_size;
1381 if (sg_tre >= (sg_ring->base + sg_ring->len))
1382 sg_tre = sg_ring->base;
1383 sg_ring->rp = sg_tre;
1384
1385 /* update must be visible to other cores */
1386 smp_wmb();
1387
1388 tx_cb_param = vd->tx.callback_param;
1389 if (tx_cb_param) {
1390 GPII_VERB(gpii, gpii_chan->chid,
1391 "cb_length:%u compl_code:0x%x status:0x%x\n",
1392 compl_event->length, compl_event->code,
1393 compl_event->status);
1394 tx_cb_param->length = compl_event->length;
1395 tx_cb_param->completion_code = compl_event->code;
1396 tx_cb_param->status = compl_event->status;
1397 }
1398
Sujeev Diase0197092017-11-27 20:36:26 -08001399 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001400 vchan_cookie_complete(vd);
Sujeev Diase0197092017-11-27 20:36:26 -08001401 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001402}
1403
1404/* process all events */
1405static void gpi_process_events(struct gpii *gpii)
1406{
1407 struct gpi_ring *ev_ring = &gpii->ev_ring;
1408 u32 cntxt_rp, local_rp;
1409 union gpi_event *gpi_event;
1410 struct gpii_chan *gpii_chan;
1411 u32 chid, type;
1412 u32 ieob_irq;
1413
1414 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1415 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1416
1417 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
1418 cntxt_rp, local_rp);
1419
1420 do {
1421 while (local_rp != cntxt_rp) {
1422 gpi_event = ev_ring->rp;
1423 chid = gpi_event->xfer_compl_event.chid;
1424 type = gpi_event->xfer_compl_event.type;
1425 GPII_VERB(gpii, GPI_DBG_COMMON,
1426 "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
1427 local_rp, chid, type,
1428 gpi_event->gpi_ere.dword[0],
1429 gpi_event->gpi_ere.dword[1],
1430 gpi_event->gpi_ere.dword[2],
1431 gpi_event->gpi_ere.dword[3]);
1432
1433 switch (type) {
1434 case XFER_COMPLETE_EV_TYPE:
1435 gpii_chan = &gpii->gpii_chan[chid];
1436 gpi_process_xfer_compl_event(gpii_chan,
1437 &gpi_event->xfer_compl_event);
1438 break;
1439 case STALE_EV_TYPE:
1440 GPII_VERB(gpii, GPI_DBG_COMMON,
1441 "stale event, not processing\n");
1442 break;
1443 case IMMEDIATE_DATA_EV_TYPE:
1444 gpii_chan = &gpii->gpii_chan[chid];
1445 gpi_process_imed_data_event(gpii_chan,
1446 &gpi_event->immediate_data_event);
1447 break;
1448 case QUP_NOTIF_EV_TYPE:
1449 gpii_chan = &gpii->gpii_chan[chid];
1450 gpi_process_qup_notif_event(gpii_chan,
1451 &gpi_event->qup_notif_event);
1452 break;
1453 default:
1454 GPII_VERB(gpii, GPI_DBG_COMMON,
1455 "not supported event type:0x%x\n",
1456 type);
1457 }
1458 gpi_ring_recycle_ev_element(ev_ring);
1459 local_rp = (u32)to_physical(ev_ring,
1460 (void *)ev_ring->rp);
1461 }
1462 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1463
1464 /* clear pending IEOB events */
1465 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
1466 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
1467
1468 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1469 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1470
1471 } while (cntxt_rp != local_rp);
1472
1473 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
1474 local_rp);
1475}
1476
1477/* processing events using tasklet */
1478static void gpi_ev_tasklet(unsigned long data)
1479{
1480 struct gpii *gpii = (struct gpii *)data;
1481
1482 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1483
1484 read_lock_bh(&gpii->pm_lock);
1485 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1486 read_unlock_bh(&gpii->pm_lock);
1487 GPII_ERR(gpii, GPI_DBG_COMMON,
1488 "not processing any events, pm_state:%s\n",
1489 TO_GPI_PM_STR(gpii->pm_state));
1490 return;
1491 }
1492
1493 /* process the events */
1494 gpi_process_events(gpii);
1495
1496 /* enable IEOB, switching back to interrupts */
1497 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1498 read_unlock_bh(&gpii->pm_lock);
1499
1500 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1501}
1502
1503/* marks all pending events for the channel as stale */
1504void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1505{
1506 struct gpii *gpii = gpii_chan->gpii;
1507 struct gpi_ring *ev_ring = &gpii->ev_ring;
1508 void *ev_rp;
1509 u32 cntxt_rp, local_rp;
1510
1511 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1512 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1513
1514 ev_rp = ev_ring->rp;
1515 local_rp = (u32)to_physical(ev_ring, ev_rp);
1516 while (local_rp != cntxt_rp) {
1517 union gpi_event *gpi_event = ev_rp;
1518 u32 chid = gpi_event->xfer_compl_event.chid;
1519
1520 if (chid == gpii_chan->chid)
1521 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1522 ev_rp += ev_ring->el_size;
1523 if (ev_rp >= (ev_ring->base + ev_ring->len))
1524 ev_rp = ev_ring->base;
1525 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1526 local_rp = (u32)to_physical(ev_ring, ev_rp);
1527 }
1528}
1529
1530/* reset sw state and issue channel reset or de-alloc */
1531static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1532{
1533 struct gpii *gpii = gpii_chan->gpii;
1534 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1535 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1536 unsigned long flags;
1537 LIST_HEAD(list);
1538 int ret;
1539
1540 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1541 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1542 if (ret) {
1543 GPII_ERR(gpii, gpii_chan->chid,
1544 "Error with cmd:%s ret:%d\n",
1545 TO_GPI_CMD_STR(gpi_cmd), ret);
1546 return ret;
1547 }
1548
1549 /* initialize the local ring ptrs */
1550 ch_ring->rp = ch_ring->base;
1551 ch_ring->wp = ch_ring->base;
1552 sg_ring->rp = sg_ring->base;
1553 sg_ring->wp = sg_ring->base;
1554
1555 /* visible to other cores */
1556 smp_wmb();
1557
1558 /* check event ring for any stale events */
1559 write_lock_irq(&gpii->pm_lock);
1560 gpi_mark_stale_events(gpii_chan);
1561
1562 /* remove all async descriptors */
1563 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1564 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1565 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1566 write_unlock_irq(&gpii->pm_lock);
1567 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1568
1569 return 0;
1570}
1571
1572static int gpi_start_chan(struct gpii_chan *gpii_chan)
1573{
1574 struct gpii *gpii = gpii_chan->gpii;
1575 int ret;
1576
1577 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1578
1579 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1580 if (ret) {
1581 GPII_ERR(gpii, gpii_chan->chid,
1582 "Error with cmd:%s ret:%d\n",
1583 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1584 return ret;
1585 }
1586
1587 /* gpii CH is active now */
1588 write_lock_irq(&gpii->pm_lock);
1589 gpii_chan->pm_state = ACTIVE_STATE;
1590 write_unlock_irq(&gpii->pm_lock);
1591
1592 return 0;
1593}
1594
1595/* allocate and configure the transfer channel */
1596static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1597{
1598 struct gpii *gpii = gpii_chan->gpii;
1599 struct gpi_ring *ring = &gpii_chan->ch_ring;
1600 int i;
1601 int ret;
1602 struct {
1603 void *base;
1604 int offset;
1605 u32 val;
1606 } ch_reg[] = {
1607 {
1608 gpii_chan->ch_cntxt_base_reg,
1609 CNTXT_0_CONFIG,
1610 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1611 gpii_chan->dir,
1612 GPI_CHTYPE_PROTO_GPI),
1613 },
1614 {
1615 gpii_chan->ch_cntxt_base_reg,
1616 CNTXT_1_R_LENGTH,
1617 ring->len,
1618 },
1619 {
1620 gpii_chan->ch_cntxt_base_reg,
1621 CNTXT_2_RING_BASE_LSB,
1622 (u32)ring->phys_addr,
1623 },
1624 {
1625 gpii_chan->ch_cntxt_base_reg,
1626 CNTXT_3_RING_BASE_MSB,
1627 (u32)(ring->phys_addr >> 32),
1628 },
1629 { /* program MSB of DB register with ring base */
1630 gpii_chan->ch_cntxt_db_reg,
1631 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1632 (u32)(ring->phys_addr >> 32),
1633 },
1634 {
1635 gpii->regs,
1636 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1637 gpii_chan->chid),
1638 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1639 gpii_chan->protocol,
1640 gpii_chan->seid),
1641 },
1642 {
1643 gpii->regs,
1644 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1645 gpii_chan->chid),
1646 0,
1647 },
1648 {
1649 gpii->regs,
1650 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1651 gpii_chan->chid),
1652 0,
1653 },
1654 {
1655 gpii->regs,
1656 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1657 gpii_chan->chid),
1658 0,
1659 },
1660 {
1661 gpii->regs,
1662 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1663 gpii_chan->chid),
1664 1,
1665 },
1666 { NULL },
1667 };
1668
1669 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1670
1671 if (send_alloc_cmd) {
1672 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1673 if (ret) {
1674 GPII_ERR(gpii, gpii_chan->chid,
1675 "Error with cmd:%s ret:%d\n",
1676 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1677 return ret;
1678 }
1679 }
1680
1681 /* program channel cntxt registers */
1682 for (i = 0; ch_reg[i].base; i++)
1683 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1684 ch_reg[i].val);
1685 /* flush all the writes */
1686 wmb();
1687 return 0;
1688}
1689
1690/* allocate and configure event ring */
1691static int gpi_alloc_ev_chan(struct gpii *gpii)
1692{
1693 struct gpi_ring *ring = &gpii->ev_ring;
1694 int i;
1695 int ret;
1696 struct {
1697 void *base;
1698 int offset;
1699 u32 val;
1700 } ev_reg[] = {
1701 {
1702 gpii->ev_cntxt_base_reg,
1703 CNTXT_0_CONFIG,
1704 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1705 GPI_INTTYPE_IRQ,
1706 GPI_CHTYPE_GPI_EV),
1707 },
1708 {
1709 gpii->ev_cntxt_base_reg,
1710 CNTXT_1_R_LENGTH,
1711 ring->len,
1712 },
1713 {
1714 gpii->ev_cntxt_base_reg,
1715 CNTXT_2_RING_BASE_LSB,
1716 (u32)ring->phys_addr,
1717 },
1718 {
1719 gpii->ev_cntxt_base_reg,
1720 CNTXT_3_RING_BASE_MSB,
1721 (u32)(ring->phys_addr >> 32),
1722 },
1723 {
1724 /* program db msg with ring base msb */
1725 gpii->ev_cntxt_db_reg,
1726 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1727 (u32)(ring->phys_addr >> 32),
1728 },
1729 {
1730 gpii->ev_cntxt_base_reg,
1731 CNTXT_8_RING_INT_MOD,
1732 0,
1733 },
1734 {
1735 gpii->ev_cntxt_base_reg,
1736 CNTXT_10_RING_MSI_LSB,
1737 0,
1738 },
1739 {
1740 gpii->ev_cntxt_base_reg,
1741 CNTXT_11_RING_MSI_MSB,
1742 0,
1743 },
1744 {
1745 gpii->ev_cntxt_base_reg,
1746 CNTXT_8_RING_INT_MOD,
1747 0,
1748 },
1749 {
1750 gpii->ev_cntxt_base_reg,
1751 CNTXT_12_RING_RP_UPDATE_LSB,
1752 0,
1753 },
1754 {
1755 gpii->ev_cntxt_base_reg,
1756 CNTXT_13_RING_RP_UPDATE_MSB,
1757 0,
1758 },
1759 { NULL },
1760 };
1761
1762 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1763
1764 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1765 if (ret) {
1766 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1767 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1768 return ret;
1769 }
1770
1771 /* program event context */
1772 for (i = 0; ev_reg[i].base; i++)
1773 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1774 ev_reg[i].val);
1775
1776 /* add events to ring */
1777 ring->wp = (ring->base + ring->len - ring->el_size);
1778
1779 /* flush all the writes */
1780 wmb();
1781
1782 /* gpii is active now */
1783 write_lock_irq(&gpii->pm_lock);
1784 gpii->pm_state = ACTIVE_STATE;
1785 write_unlock_irq(&gpii->pm_lock);
1786 gpi_write_ev_db(gpii, ring, ring->wp);
1787
1788 return 0;
1789}
1790
1791/* calculate # of ERE/TRE available to queue */
1792static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1793{
1794 int elements = 0;
1795
1796 if (ring->wp < ring->rp)
1797 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1798 else {
1799 elements = (ring->rp - ring->base) / ring->el_size;
1800 elements += ((ring->base + ring->len - ring->wp) /
1801 ring->el_size) - 1;
1802 }
1803
1804 return elements;
1805}
1806
1807static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1808{
1809
1810 if (gpi_ring_num_elements_avail(ring) <= 0)
1811 return -ENOMEM;
1812
1813 *wp = ring->wp;
1814 ring->wp += ring->el_size;
1815 if (ring->wp >= (ring->base + ring->len))
1816 ring->wp = ring->base;
1817
1818 /* visible to other cores */
1819 smp_wmb();
1820
1821 return 0;
1822}
1823
1824static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1825{
1826 /* Update the WP */
1827 ring->wp += ring->el_size;
1828 if (ring->wp >= (ring->base + ring->len))
1829 ring->wp = ring->base;
1830
1831 /* Update the RP */
1832 ring->rp += ring->el_size;
1833 if (ring->rp >= (ring->base + ring->len))
1834 ring->rp = ring->base;
1835
1836 /* visible to other cores */
1837 smp_wmb();
1838}
1839
1840static void gpi_free_ring(struct gpi_ring *ring,
1841 struct gpii *gpii)
1842{
1843 if (ring->dma_handle)
1844 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1845 ring->pre_aligned, ring->dma_handle);
1846 else
1847 vfree(ring->pre_aligned);
1848 memset(ring, 0, sizeof(*ring));
1849}
1850
1851/* allocate memory for transfer and event rings */
1852static int gpi_alloc_ring(struct gpi_ring *ring,
1853 u32 elements,
1854 u32 el_size,
1855 struct gpii *gpii,
1856 bool alloc_coherent)
1857{
1858 u64 len = elements * el_size;
1859 int bit;
1860
1861 if (alloc_coherent) {
1862 /* ring len must be power of 2 */
1863 bit = find_last_bit((unsigned long *)&len, 32);
1864 if (((1 << bit) - 1) & len)
1865 bit++;
1866 len = 1 << bit;
1867 ring->alloc_size = (len + (len - 1));
1868 GPII_INFO(gpii, GPI_DBG_COMMON,
1869 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
1870 elements, el_size, (elements * el_size), len,
1871 ring->alloc_size);
1872 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1873 ring->alloc_size,
1874 &ring->dma_handle,
1875 GFP_KERNEL);
1876 if (!ring->pre_aligned) {
1877 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1878 "could not alloc size:%lu mem for ring\n",
1879 ring->alloc_size);
1880 return -ENOMEM;
1881 }
1882
1883 /* align the physical mem */
1884 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1885 ring->base = ring->pre_aligned +
1886 (ring->phys_addr - ring->dma_handle);
1887 } else {
1888 ring->pre_aligned = vmalloc(len);
1889 if (!ring->pre_aligned) {
1890 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1891 "could not allocsize:%llu mem for ring\n",
1892 len);
1893 return -ENOMEM;
1894 }
1895 ring->phys_addr = 0;
1896 ring->dma_handle = 0;
1897 ring->base = ring->pre_aligned;
1898 }
1899
1900 ring->rp = ring->base;
1901 ring->wp = ring->base;
1902 ring->len = len;
1903 ring->el_size = el_size;
1904 ring->elements = ring->len / ring->el_size;
1905 memset(ring->base, 0, ring->len);
1906 ring->configured = true;
1907
1908 /* update to other cores */
1909 smp_wmb();
1910
1911 GPII_INFO(gpii, GPI_DBG_COMMON,
1912 "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
1913 ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
1914 ring->elements);
1915
1916 return 0;
1917}
1918
1919/* copy tre into transfer ring */
1920static void gpi_queue_xfer(struct gpii *gpii,
1921 struct gpii_chan *gpii_chan,
1922 struct msm_gpi_tre *gpi_tre,
1923 void **wp,
1924 struct sg_tre **sg_tre)
1925{
1926 struct msm_gpi_tre *ch_tre;
1927 int ret;
1928
1929 /* get next tre location we can copy */
1930 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1931 if (unlikely(ret)) {
1932 GPII_CRITIC(gpii, gpii_chan->chid,
1933 "Error adding ring element to xfer ring\n");
1934 return;
1935 }
1936 /* get next sg tre location we can use */
1937 ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
1938 if (unlikely(ret)) {
1939 GPII_CRITIC(gpii, gpii_chan->chid,
1940 "Error adding ring element to sg ring\n");
1941 return;
1942 }
1943
1944 /* copy the tre info */
1945 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
1946 (*sg_tre)->ptr = gpi_tre;
1947 (*sg_tre)->wp = ch_tre;
1948 *wp = ch_tre;
1949}
1950
1951/* reset and restart transfer channel */
1952int gpi_terminate_all(struct dma_chan *chan)
1953{
1954 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1955 struct gpii *gpii = gpii_chan->gpii;
1956 int schid, echid, i;
1957 int ret = 0;
1958
1959 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1960 mutex_lock(&gpii->ctrl_lock);
1961
1962 /*
1963 * treat both channels as a group if its protocol is not UART
1964 * STOP, RESET, or START needs to be in lockstep
1965 */
1966 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1967 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1968 MAX_CHANNELS_PER_GPII;
1969
1970 /* stop the channel */
1971 for (i = schid; i < echid; i++) {
1972 gpii_chan = &gpii->gpii_chan[i];
1973
1974 /* disable ch state so no more TRE processing */
1975 write_lock_irq(&gpii->pm_lock);
1976 gpii_chan->pm_state = PREPARE_TERMINATE;
1977 write_unlock_irq(&gpii->pm_lock);
1978
1979 /* send command to Stop the channel */
1980 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1981 if (ret)
1982 GPII_ERR(gpii, gpii_chan->chid,
1983 "Error Stopping Channel:%d resetting anyway\n",
1984 ret);
1985 }
1986
1987 /* reset the channels (clears any pending tre) */
1988 for (i = schid; i < echid; i++) {
1989 gpii_chan = &gpii->gpii_chan[i];
1990
1991 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1992 if (ret) {
1993 GPII_ERR(gpii, gpii_chan->chid,
1994 "Error resetting channel ret:%d\n", ret);
1995 goto terminate_exit;
1996 }
1997
1998 /* reprogram channel CNTXT */
1999 ret = gpi_alloc_chan(gpii_chan, false);
2000 if (ret) {
2001 GPII_ERR(gpii, gpii_chan->chid,
2002 "Error alloc_channel ret:%d\n", ret);
2003 goto terminate_exit;
2004 }
2005 }
2006
2007 /* restart the channels */
2008 for (i = schid; i < echid; i++) {
2009 gpii_chan = &gpii->gpii_chan[i];
2010
2011 ret = gpi_start_chan(gpii_chan);
2012 if (ret) {
2013 GPII_ERR(gpii, gpii_chan->chid,
2014 "Error Starting Channel ret:%d\n", ret);
2015 goto terminate_exit;
2016 }
2017 }
2018
2019terminate_exit:
2020 mutex_unlock(&gpii->ctrl_lock);
2021 return ret;
2022}
2023
2024/* pause dma transfer for all channels */
2025static int gpi_pause(struct dma_chan *chan)
2026{
2027 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2028 struct gpii *gpii = gpii_chan->gpii;
2029 int i, ret;
2030
2031 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
2032 mutex_lock(&gpii->ctrl_lock);
2033
2034 /*
2035 * pause/resume are per gpii not per channel, so
2036 * client needs to call pause only once
2037 */
2038 if (gpii->pm_state == PAUSE_STATE) {
2039 GPII_INFO(gpii, gpii_chan->chid,
2040 "channel is already paused\n");
2041 mutex_unlock(&gpii->ctrl_lock);
2042 return 0;
2043 }
2044
2045 /* send stop command to stop the channels */
2046 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2047 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
2048 if (ret) {
2049 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2050 "Error stopping chan, ret:%d\n", ret);
2051 mutex_unlock(&gpii->ctrl_lock);
2052 return ret;
2053 }
2054 }
2055
2056 disable_irq(gpii->irq);
2057
2058 /* Wait for threads to complete out */
2059 tasklet_kill(&gpii->ev_task);
2060
2061 write_lock_irq(&gpii->pm_lock);
2062 gpii->pm_state = PAUSE_STATE;
2063 write_unlock_irq(&gpii->pm_lock);
2064 mutex_unlock(&gpii->ctrl_lock);
2065
2066 return 0;
2067}
2068
2069/* resume dma transfer */
2070static int gpi_resume(struct dma_chan *chan)
2071{
2072 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2073 struct gpii *gpii = gpii_chan->gpii;
2074 int i;
2075 int ret;
2076
2077 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2078
2079 mutex_lock(&gpii->ctrl_lock);
2080 if (gpii->pm_state == ACTIVE_STATE) {
2081 GPII_INFO(gpii, gpii_chan->chid,
2082 "channel is already active\n");
2083 mutex_unlock(&gpii->ctrl_lock);
2084 return 0;
2085 }
2086
2087 enable_irq(gpii->irq);
2088
2089 /* send start command to start the channels */
2090 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2091 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2092 if (ret) {
2093 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2094 "Erro starting chan, ret:%d\n", ret);
2095 mutex_unlock(&gpii->ctrl_lock);
2096 return ret;
2097 }
2098 }
2099
2100 write_lock_irq(&gpii->pm_lock);
2101 gpii->pm_state = ACTIVE_STATE;
2102 write_unlock_irq(&gpii->pm_lock);
2103 mutex_unlock(&gpii->ctrl_lock);
2104
2105 return 0;
2106}
2107
2108void gpi_desc_free(struct virt_dma_desc *vd)
2109{
2110 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2111
2112 kfree(gpi_desc);
2113}
2114
2115/* copy tre into transfer ring */
2116struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2117 struct scatterlist *sgl,
2118 unsigned int sg_len,
2119 enum dma_transfer_direction direction,
2120 unsigned long flags,
2121 void *context)
2122{
2123 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2124 struct gpii *gpii = gpii_chan->gpii;
2125 u32 nr, sg_nr;
2126 u32 nr_req = 0;
2127 int i, j;
2128 struct scatterlist *sg;
2129 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
2130 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
2131 void *tre, *wp = NULL;
2132 struct sg_tre *sg_tre = NULL;
2133 const gfp_t gfp = GFP_ATOMIC;
2134 struct gpi_desc *gpi_desc;
2135
2136 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2137
2138 if (!is_slave_direction(direction)) {
2139 GPII_ERR(gpii, gpii_chan->chid,
2140 "invalid dma direction: %d\n", direction);
2141 return NULL;
2142 }
2143
2144 /* calculate # of elements required & available */
2145 nr = gpi_ring_num_elements_avail(ch_ring);
2146 sg_nr = gpi_ring_num_elements_avail(sg_ring);
2147 for_each_sg(sgl, sg, sg_len, i) {
2148 GPII_VERB(gpii, gpii_chan->chid,
2149 "%d of %u len:%u\n", i, sg_len, sg->length);
2150 nr_req += (sg->length / ch_ring->el_size);
2151 }
2152 GPII_VERB(gpii, gpii_chan->chid,
2153 "nr_elements_avail:%u sg_avail:%u required:%u\n",
2154 nr, sg_nr, nr_req);
2155
2156 if (nr < nr_req || sg_nr < nr_req) {
2157 GPII_ERR(gpii, gpii_chan->chid,
2158 "not enough space in ring, avail:%u,%u required:%u\n",
2159 nr, sg_nr, nr_req);
2160 return NULL;
2161 }
2162
2163 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2164 if (!gpi_desc) {
2165 GPII_ERR(gpii, gpii_chan->chid,
2166 "out of memory for descriptor\n");
2167 return NULL;
2168 }
2169
2170 /* copy each tre into transfer ring */
2171 for_each_sg(sgl, sg, sg_len, i)
2172 for (j = 0, tre = sg_virt(sg); j < sg->length;
2173 j += ch_ring->el_size, tre += ch_ring->el_size)
2174 gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
2175
2176 /* set up the descriptor */
2177 gpi_desc->db = ch_ring->wp;
2178 gpi_desc->wp = wp;
2179 gpi_desc->sg_tre = sg_tre;
2180 gpi_desc->gpii_chan = gpii_chan;
2181 GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
2182 to_physical(ch_ring, ch_ring->wp),
2183 to_physical(ch_ring, ch_ring->rp));
2184
2185 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2186}
2187
2188/* rings transfer ring db to being transfer */
2189static void gpi_issue_pending(struct dma_chan *chan)
2190{
2191 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2192 struct gpii *gpii = gpii_chan->gpii;
2193 unsigned long flags, pm_lock_flags;
2194 struct virt_dma_desc *vd = NULL;
2195 struct gpi_desc *gpi_desc;
2196
2197 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2198
2199 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2200
2201 /* move all submitted discriptors to issued list */
2202 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2203 if (vchan_issue_pending(&gpii_chan->vc))
2204 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2205 struct virt_dma_desc, node);
2206 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2207
2208 /* nothing to do list is empty */
2209 if (!vd) {
2210 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2211 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2212 return;
2213 }
2214
2215 gpi_desc = to_gpi_desc(vd);
2216 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2217 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2218}
2219
2220/* configure or issue async command */
2221static int gpi_config(struct dma_chan *chan,
2222 struct dma_slave_config *config)
2223{
2224 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2225 struct gpii *gpii = gpii_chan->gpii;
2226 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2227 const int ev_factor = gpii->gpi_dev->ev_factor;
2228 u32 elements;
2229 int i = 0;
2230 int ret = 0;
2231
2232 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2233 if (!gpi_ctrl) {
2234 GPII_ERR(gpii, gpii_chan->chid,
2235 "no config ctrl data provided");
2236 return -EINVAL;
2237 }
2238
2239 mutex_lock(&gpii->ctrl_lock);
2240
2241 switch (gpi_ctrl->cmd) {
2242 case MSM_GPI_INIT:
2243 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2244
2245 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2246 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2247 gpii_chan->pm_state = CONFIG_STATE;
2248
2249 /* check if both channels are configured before continue */
2250 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2251 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2252 goto exit_gpi_init;
2253
2254 /* configure to highest priority from two channels */
2255 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2256 gpii->gpii_chan[1].priority);
2257
2258 /* protocol must be same for both channels */
2259 if (gpii->gpii_chan[0].protocol !=
2260 gpii->gpii_chan[1].protocol) {
2261 GPII_ERR(gpii, gpii_chan->chid,
2262 "protocol did not match protocol %u != %u\n",
2263 gpii->gpii_chan[0].protocol,
2264 gpii->gpii_chan[1].protocol);
2265 ret = -EINVAL;
2266 goto exit_gpi_init;
2267 }
2268 gpii->protocol = gpii_chan->protocol;
2269
2270 /* allocate memory for event ring */
2271 elements = max(gpii->gpii_chan[0].req_tres,
2272 gpii->gpii_chan[1].req_tres);
2273 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
2274 sizeof(union gpi_event), gpii, true);
2275 if (ret) {
2276 GPII_ERR(gpii, gpii_chan->chid,
2277 "error allocating mem for ev ring\n");
2278 goto exit_gpi_init;
2279 }
2280
2281 /* configure interrupts */
2282 write_lock_irq(&gpii->pm_lock);
2283 gpii->pm_state = PREPARE_HARDWARE;
2284 write_unlock_irq(&gpii->pm_lock);
2285 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2286 if (ret) {
2287 GPII_ERR(gpii, gpii_chan->chid,
2288 "error config. interrupts, ret:%d\n", ret);
2289 goto error_config_int;
2290 }
2291
2292 /* allocate event rings */
2293 ret = gpi_alloc_ev_chan(gpii);
2294 if (ret) {
2295 GPII_ERR(gpii, gpii_chan->chid,
2296 "error alloc_ev_chan:%d\n", ret);
2297 goto error_alloc_ev_ring;
2298 }
2299
2300 /* Allocate all channels */
2301 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2302 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2303 if (ret) {
2304 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2305 "Error allocating chan:%d\n", ret);
2306 goto error_alloc_chan;
2307 }
2308 }
2309
2310 /* start channels */
2311 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2312 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2313 if (ret) {
2314 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2315 "Error start chan:%d\n", ret);
2316 goto error_start_chan;
2317 }
2318 }
2319
2320 break;
2321 case MSM_GPI_CMD_UART_SW_STALE:
2322 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2323 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2324 break;
2325 case MSM_GPI_CMD_UART_RFR_READY:
2326 GPII_INFO(gpii, gpii_chan->chid,
2327 "sending UART RFR READY cmd\n");
2328 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2329 break;
2330 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2331 GPII_INFO(gpii, gpii_chan->chid,
2332 "sending UART RFR READY NOT READY cmd\n");
2333 ret = gpi_send_cmd(gpii, gpii_chan,
2334 GPI_CH_CMD_UART_RFR_NOT_READY);
2335 break;
2336 default:
2337 GPII_ERR(gpii, gpii_chan->chid,
2338 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2339 ret = -EINVAL;
2340 }
2341
2342 mutex_unlock(&gpii->ctrl_lock);
2343 return ret;
2344
2345error_start_chan:
2346 for (i = i - 1; i >= 0; i++) {
2347 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2348 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2349 }
2350 i = 2;
2351error_alloc_chan:
2352 for (i = i - 1; i >= 0; i--)
2353 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2354error_alloc_ev_ring:
2355 gpi_disable_interrupts(gpii);
2356error_config_int:
2357 gpi_free_ring(&gpii->ev_ring, gpii);
2358exit_gpi_init:
2359 mutex_unlock(&gpii->ctrl_lock);
2360 return ret;
2361}
2362
2363/* release all channel resources */
2364static void gpi_free_chan_resources(struct dma_chan *chan)
2365{
2366 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2367 struct gpii *gpii = gpii_chan->gpii;
2368 enum gpi_pm_state cur_state;
2369 int ret, i;
2370
2371 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2372
2373 mutex_lock(&gpii->ctrl_lock);
2374
2375 cur_state = gpii_chan->pm_state;
2376
2377 /* disable ch state so no more TRE processing for this channel */
2378 write_lock_irq(&gpii->pm_lock);
2379 gpii_chan->pm_state = PREPARE_TERMINATE;
2380 write_unlock_irq(&gpii->pm_lock);
2381
2382 /* attemp to do graceful hardware shutdown */
2383 if (cur_state == ACTIVE_STATE) {
2384 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2385 if (ret)
2386 GPII_ERR(gpii, gpii_chan->chid,
2387 "error stopping channel:%d\n", ret);
2388
2389 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2390 if (ret)
2391 GPII_ERR(gpii, gpii_chan->chid,
2392 "error resetting channel:%d\n", ret);
2393
2394 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2395 }
2396
2397 /* free all allocated memory */
2398 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2399 gpi_free_ring(&gpii_chan->sg_ring, gpii);
2400 vchan_free_chan_resources(&gpii_chan->vc);
2401
2402 write_lock_irq(&gpii->pm_lock);
2403 gpii_chan->pm_state = DISABLE_STATE;
2404 write_unlock_irq(&gpii->pm_lock);
2405
2406 /* if other rings are still active exit */
2407 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2408 if (gpii->gpii_chan[i].ch_ring.configured)
2409 goto exit_free;
2410
2411 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2412
2413 /* deallocate EV Ring */
2414 cur_state = gpii->pm_state;
2415 write_lock_irq(&gpii->pm_lock);
2416 gpii->pm_state = PREPARE_TERMINATE;
2417 write_unlock_irq(&gpii->pm_lock);
2418
2419 /* wait for threads to complete out */
2420 tasklet_kill(&gpii->ev_task);
2421
2422 /* send command to de allocate event ring */
2423 if (cur_state == ACTIVE_STATE)
2424 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2425
2426 gpi_free_ring(&gpii->ev_ring, gpii);
2427
2428 /* disable interrupts */
2429 if (cur_state == ACTIVE_STATE)
2430 gpi_disable_interrupts(gpii);
2431
2432 /* set final state to disable */
2433 write_lock_irq(&gpii->pm_lock);
2434 gpii->pm_state = DISABLE_STATE;
2435 write_unlock_irq(&gpii->pm_lock);
2436
2437exit_free:
2438 mutex_unlock(&gpii->ctrl_lock);
2439}
2440
2441/* allocate channel resources */
2442static int gpi_alloc_chan_resources(struct dma_chan *chan)
2443{
2444 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2445 struct gpii *gpii = gpii_chan->gpii;
2446 int ret;
2447
2448 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2449
2450 mutex_lock(&gpii->ctrl_lock);
2451
2452 /* allocate memory for transfer ring */
2453 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
2454 sizeof(struct msm_gpi_tre), gpii, true);
2455 if (ret) {
2456 GPII_ERR(gpii, gpii_chan->chid,
2457 "error allocating xfer ring, ret:%d\n", ret);
2458 goto xfer_alloc_err;
2459 }
2460
2461 ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
2462 sizeof(struct sg_tre), gpii, false);
2463 if (ret) {
2464 GPII_ERR(gpii, gpii_chan->chid,
2465 "error allocating sg ring, ret:%d\n", ret);
2466 goto sg_alloc_error;
2467 }
2468 mutex_unlock(&gpii->ctrl_lock);
2469
2470 return 0;
2471
2472sg_alloc_error:
2473 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2474xfer_alloc_err:
2475 mutex_unlock(&gpii->ctrl_lock);
2476
2477 return ret;
2478}
2479
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002480static int gpi_find_avail_gpii(struct gpi_dev *gpi_dev, u32 seid)
2481{
2482 int gpii;
2483 struct gpii_chan *tx_chan, *rx_chan;
2484
2485 /* check if same seid is already configured for another chid */
2486 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2487 if (!((1 << gpii) & gpi_dev->gpii_mask))
2488 continue;
2489
2490 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2491 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2492
2493 if (rx_chan->vc.chan.client_count && rx_chan->seid == seid)
2494 return gpii;
2495 if (tx_chan->vc.chan.client_count && tx_chan->seid == seid)
2496 return gpii;
2497 }
2498
2499 /* no channels configured with same seid, return next avail gpii */
2500 for (gpii = 0; gpii < gpi_dev->max_gpii; gpii++) {
2501 if (!((1 << gpii) & gpi_dev->gpii_mask))
2502 continue;
2503
2504 tx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_TX_CHAN];
2505 rx_chan = &gpi_dev->gpiis[gpii].gpii_chan[GPI_RX_CHAN];
2506
2507 /* check if gpii is configured */
2508 if (tx_chan->vc.chan.client_count ||
2509 rx_chan->vc.chan.client_count)
2510 continue;
2511
2512 /* found a free gpii */
2513 return gpii;
2514 }
2515
2516 /* no gpii instance available to use */
2517 return -EIO;
2518}
2519
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002520/* gpi_of_dma_xlate: open client requested channel */
2521static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2522 struct of_dma *of_dma)
2523{
2524 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002525 u32 seid, chid;
2526 int gpii;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002527 struct gpii_chan *gpii_chan;
2528
2529 if (args->args_count < REQ_OF_DMA_ARGS) {
2530 GPI_ERR(gpi_dev,
2531 "gpii require minimum 6 args, client passed:%d args\n",
2532 args->args_count);
2533 return NULL;
2534 }
2535
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002536 chid = args->args[0];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002537 if (chid >= MAX_CHANNELS_PER_GPII) {
2538 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2539 return NULL;
2540 }
2541
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002542 seid = args->args[1];
2543
2544 /* find next available gpii to use */
2545 gpii = gpi_find_avail_gpii(gpi_dev, seid);
2546 if (gpii < 0) {
2547 GPI_ERR(gpi_dev, "no available gpii instances\n");
2548 return NULL;
2549 }
2550
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002551 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
Sujeev Diasdfe09e12017-08-31 18:31:04 -07002552 if (gpii_chan->vc.chan.client_count) {
2553 GPI_ERR(gpi_dev, "gpii:%d chid:%d seid:%d already configured\n",
2554 gpii, chid, gpii_chan->seid);
2555 return NULL;
2556 }
2557
2558 /* get ring size, protocol, se_id, and priority */
2559 gpii_chan->seid = seid;
2560 gpii_chan->protocol = args->args[2];
2561 gpii_chan->req_tres = args->args[3];
2562 gpii_chan->priority = args->args[4];
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002563
2564 GPI_LOG(gpi_dev,
2565 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2566 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2567 gpii_chan->protocol);
2568
2569 return dma_get_slave_channel(&gpii_chan->vc.chan);
2570}
2571
2572/* gpi_setup_debug - setup debug capabilities */
2573static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2574{
2575 char node_name[GPI_LABEL_SIZE];
2576 const umode_t mode = 0600;
2577 int i;
2578
2579 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2580 (u64)gpi_dev->res->start);
2581
2582 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2583 node_name, 0);
2584 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2585 if (!IS_ERR_OR_NULL(pdentry)) {
2586 snprintf(node_name, sizeof(node_name), "%llx",
2587 (u64)gpi_dev->res->start);
2588 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2589 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2590 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2591 &gpi_dev->ipc_log_lvl);
2592 debugfs_create_u32("klog_lvl", mode,
2593 gpi_dev->dentry, &gpi_dev->klog_lvl);
2594 }
2595 }
2596
2597 for (i = 0; i < gpi_dev->max_gpii; i++) {
2598 struct gpii *gpii;
2599
2600 if (!((1 << i) & gpi_dev->gpii_mask))
2601 continue;
2602
2603 gpii = &gpi_dev->gpiis[i];
2604 snprintf(gpii->label, sizeof(gpii->label),
2605 "%s%llx_gpii%d",
2606 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2607 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2608 gpii->label, 0);
2609 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2610 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2611
2612 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2613 continue;
2614
2615 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2616 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2617 if (IS_ERR_OR_NULL(gpii->dentry))
2618 continue;
2619
2620 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2621 &gpii->ipc_log_lvl);
2622 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2623 &gpii->klog_lvl);
2624 }
2625}
2626
Sujeev Dias69484212017-08-31 10:06:53 -07002627static struct dma_iommu_mapping *gpi_create_mapping(struct gpi_dev *gpi_dev)
2628{
2629 dma_addr_t base;
2630 size_t size;
2631
2632 /*
2633 * If S1_BYPASS enabled then iommu space is not used, however framework
2634 * still require clients to create a mapping space before attaching. So
2635 * set to smallest size required by iommu framework.
2636 */
2637 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2638 base = 0;
2639 size = PAGE_SIZE;
2640 } else {
2641 base = gpi_dev->iova_base;
2642 size = gpi_dev->iova_size;
2643 }
2644
2645 GPI_LOG(gpi_dev, "Creating iommu mapping of base:0x%llx size:%lu\n",
2646 base, size);
2647
2648 return arm_iommu_create_mapping(&platform_bus_type, base, size);
2649}
2650
2651static int gpi_dma_mask(struct gpi_dev *gpi_dev)
2652{
2653 int mask = 64;
2654
2655 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2656 unsigned long addr;
2657
2658 addr = gpi_dev->iova_base + gpi_dev->iova_size + 1;
2659 mask = find_last_bit(&addr, 64);
2660 }
2661
2662 GPI_LOG(gpi_dev, "Setting dma mask to %d\n", mask);
2663
2664 return dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(mask));
2665}
2666
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002667static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2668{
Sujeev Dias69484212017-08-31 10:06:53 -07002669 struct dma_iommu_mapping *mapping = NULL;
2670 int ret;
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002671
Sujeev Dias69484212017-08-31 10:06:53 -07002672 if (gpi_dev->smmu_cfg) {
2673
2674 /* create mapping table */
2675 mapping = gpi_create_mapping(gpi_dev);
2676 if (IS_ERR(mapping)) {
2677 GPI_ERR(gpi_dev,
2678 "Failed to create iommu mapping, ret:%ld\n",
2679 PTR_ERR(mapping));
2680 return PTR_ERR(mapping);
2681 }
2682
2683 if (gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS) {
2684 int s1_bypass = 1;
2685
2686 ret = iommu_domain_set_attr(mapping->domain,
2687 DOMAIN_ATTR_S1_BYPASS, &s1_bypass);
2688 if (ret) {
2689 GPI_ERR(gpi_dev,
2690 "Failed to set attr S1_BYPASS, ret:%d\n",
2691 ret);
2692 goto release_mapping;
2693 }
2694 }
2695
2696 if (gpi_dev->smmu_cfg & GPI_SMMU_FAST) {
2697 int fast = 1;
2698
2699 ret = iommu_domain_set_attr(mapping->domain,
2700 DOMAIN_ATTR_FAST, &fast);
2701 if (ret) {
2702 GPI_ERR(gpi_dev,
2703 "Failed to set attr FAST, ret:%d\n",
2704 ret);
2705 goto release_mapping;
2706 }
2707 }
2708
2709 if (gpi_dev->smmu_cfg & GPI_SMMU_ATOMIC) {
2710 int atomic = 1;
2711
2712 ret = iommu_domain_set_attr(mapping->domain,
2713 DOMAIN_ATTR_ATOMIC, &atomic);
2714 if (ret) {
2715 GPI_ERR(gpi_dev,
2716 "Failed to set attr ATOMIC, ret:%d\n",
2717 ret);
2718 goto release_mapping;
2719 }
2720 }
2721
2722 ret = arm_iommu_attach_device(gpi_dev->dev, mapping);
2723 if (ret) {
2724 GPI_ERR(gpi_dev,
2725 "Failed with iommu_attach, ret:%d\n", ret);
2726 goto release_mapping;
2727 }
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002728 }
2729
Sujeev Dias69484212017-08-31 10:06:53 -07002730 ret = gpi_dma_mask(gpi_dev);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002731 if (ret) {
Sujeev Dias69484212017-08-31 10:06:53 -07002732 GPI_ERR(gpi_dev, "Error setting dma_mask, ret:%d\n", ret);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002733 goto error_set_mask;
2734 }
2735
2736 return ret;
2737
2738error_set_mask:
Sujeev Dias69484212017-08-31 10:06:53 -07002739 if (gpi_dev->smmu_cfg)
2740 arm_iommu_detach_device(gpi_dev->dev);
2741release_mapping:
2742 if (mapping)
2743 arm_iommu_release_mapping(mapping);
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002744 return ret;
2745}
2746
2747static int gpi_probe(struct platform_device *pdev)
2748{
2749 struct gpi_dev *gpi_dev;
2750 int ret, i;
2751
2752 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2753 if (!gpi_dev)
2754 return -ENOMEM;
2755
2756 gpi_dev->dev = &pdev->dev;
2757 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2758 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2759 "gpi-top");
2760 if (!gpi_dev->res) {
2761 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2762 return -EINVAL;
2763 }
2764 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2765 resource_size(gpi_dev->res));
2766 if (!gpi_dev->regs) {
2767 GPI_ERR(gpi_dev, "IO remap failed\n");
2768 return -EFAULT;
2769 }
2770
2771 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2772 &gpi_dev->max_gpii);
2773 if (ret) {
2774 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2775 return ret;
2776 }
2777
2778 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2779 &gpi_dev->gpii_mask);
2780 if (ret) {
2781 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2782 return ret;
2783 }
2784
2785 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2786 &gpi_dev->ev_factor);
2787 if (ret) {
2788 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2789 return ret;
2790 }
2791
Sujeev Dias69484212017-08-31 10:06:53 -07002792 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,smmu-cfg",
2793 &gpi_dev->smmu_cfg);
2794 if (ret) {
2795 GPI_ERR(gpi_dev, "missing 'qcom,smmu-cfg' DT node\n");
2796 return ret;
2797 }
2798 if (gpi_dev->smmu_cfg && !(gpi_dev->smmu_cfg & GPI_SMMU_S1_BYPASS)) {
2799 u64 iova_range[2];
2800
2801 ret = of_property_count_elems_of_size(gpi_dev->dev->of_node,
2802 "qcom,iova-range",
2803 sizeof(iova_range));
2804 if (ret != 1) {
2805 GPI_ERR(gpi_dev,
2806 "missing or incorrect 'qcom,iova-range' DT node ret:%d\n",
2807 ret);
2808 }
2809
2810 ret = of_property_read_u64_array(gpi_dev->dev->of_node,
2811 "qcom,iova-range", iova_range,
2812 sizeof(iova_range) / sizeof(u64));
2813 if (ret) {
2814 GPI_ERR(gpi_dev,
2815 "could not read DT prop 'qcom,iova-range\n");
2816 return ret;
2817 }
2818 gpi_dev->iova_base = iova_range[0];
2819 gpi_dev->iova_size = iova_range[1];
2820 }
2821
Sujeev Diasdd66ce02016-09-07 11:35:11 -07002822 ret = gpi_smmu_init(gpi_dev);
2823 if (ret) {
2824 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2825 return ret;
2826 }
2827
2828 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2829 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2830 GFP_KERNEL);
2831 if (!gpi_dev->gpiis)
2832 return -ENOMEM;
2833
2834
2835 /* setup all the supported gpii */
2836 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2837 for (i = 0; i < gpi_dev->max_gpii; i++) {
2838 struct gpii *gpii = &gpi_dev->gpiis[i];
2839 int chan;
2840
2841 if (!((1 << i) & gpi_dev->gpii_mask))
2842 continue;
2843
2844 /* set up ev cntxt register map */
2845 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2846 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2847 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2848 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2849 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2850 CNTXT_2_RING_BASE_LSB;
2851 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2852 CNTXT_4_RING_RP_LSB;
2853 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2854 CNTXT_6_RING_WP_LSB;
2855 gpii->ev_cmd_reg = gpi_dev->regs +
2856 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2857 gpii->ieob_src_reg = gpi_dev->regs +
2858 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2859 gpii->ieob_clr_reg = gpi_dev->regs +
2860 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2861
2862 /* set up irq */
2863 ret = platform_get_irq(pdev, i);
2864 if (ret < 0) {
2865 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2866 i, ret);
2867 return ret;
2868 }
2869 gpii->irq = ret;
2870
2871 /* set up channel specific register info */
2872 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2873 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2874
2875 /* set up ch cntxt register map */
2876 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2877 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2878 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2879 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2880 gpii_chan->ch_ring_base_lsb_reg =
2881 gpii_chan->ch_cntxt_base_reg +
2882 CNTXT_2_RING_BASE_LSB;
2883 gpii_chan->ch_ring_rp_lsb_reg =
2884 gpii_chan->ch_cntxt_base_reg +
2885 CNTXT_4_RING_RP_LSB;
2886 gpii_chan->ch_ring_wp_lsb_reg =
2887 gpii_chan->ch_cntxt_base_reg +
2888 CNTXT_6_RING_WP_LSB;
2889 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2890 GPI_GPII_n_CH_CMD_OFFS(i);
2891
2892 /* vchan setup */
2893 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2894 gpii_chan->vc.desc_free = gpi_desc_free;
2895 gpii_chan->chid = chan;
2896 gpii_chan->gpii = gpii;
2897 gpii_chan->dir = GPII_CHAN_DIR[chan];
2898 }
2899 mutex_init(&gpii->ctrl_lock);
2900 rwlock_init(&gpii->pm_lock);
2901 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2902 (unsigned long)gpii);
2903 init_completion(&gpii->cmd_completion);
2904 gpii->gpii_id = i;
2905 gpii->regs = gpi_dev->regs;
2906 gpii->gpi_dev = gpi_dev;
2907 atomic_set(&gpii->dbg_index, 0);
2908 }
2909
2910 platform_set_drvdata(pdev, gpi_dev);
2911
2912 /* clear and Set capabilities */
2913 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2914 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2915
2916 /* configure dmaengine apis */
2917 gpi_dev->dma_device.directions =
2918 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2919 gpi_dev->dma_device.residue_granularity =
2920 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2921 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2922 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2923 gpi_dev->dma_device.device_alloc_chan_resources =
2924 gpi_alloc_chan_resources;
2925 gpi_dev->dma_device.device_free_chan_resources =
2926 gpi_free_chan_resources;
2927 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2928 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2929 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2930 gpi_dev->dma_device.device_config = gpi_config;
2931 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2932 gpi_dev->dma_device.dev = gpi_dev->dev;
2933 gpi_dev->dma_device.device_pause = gpi_pause;
2934 gpi_dev->dma_device.device_resume = gpi_resume;
2935
2936 /* register with dmaengine framework */
2937 ret = dma_async_device_register(&gpi_dev->dma_device);
2938 if (ret) {
2939 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2940 return ret;
2941 }
2942
2943 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2944 gpi_of_dma_xlate, gpi_dev);
2945 if (ret) {
2946 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2947 return ret;
2948 }
2949
2950 /* setup debug capabilities */
2951 gpi_setup_debug(gpi_dev);
2952 GPI_LOG(gpi_dev, "probe success\n");
2953
2954 return ret;
2955}
2956
2957static const struct of_device_id gpi_of_match[] = {
2958 { .compatible = "qcom,gpi-dma" },
2959 {}
2960};
2961MODULE_DEVICE_TABLE(of, gpi_of_match);
2962
2963static struct platform_driver gpi_driver = {
2964 .probe = gpi_probe,
2965 .driver = {
2966 .name = GPI_DMA_DRV_NAME,
2967 .of_match_table = gpi_of_match,
2968 },
2969};
2970
2971static int __init gpi_init(void)
2972{
2973 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2974 return platform_driver_register(&gpi_driver);
2975}
2976module_init(gpi_init)
2977
2978MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2979MODULE_LICENSE("GPL v2");