blob: 6e6f28fb8acb45a5314398f474bfc458edfa1218 [file] [log] [blame]
Sujeev Diasdd66ce02016-09-07 11:35:11 -07001/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <asm/dma-iommu.h>
14#include <linux/atomic.h>
15#include <linux/completion.h>
16#include <linux/debugfs.h>
17#include <linux/device.h>
18#include <linux/dma-mapping.h>
19#include <linux/dmaengine.h>
20#include <linux/io.h>
21#include <linux/iommu.h>
22#include <linux/init.h>
23#include <linux/interrupt.h>
24#include <linux/ipc_logging.h>
25#include <linux/kernel.h>
26#include <linux/module.h>
27#include <linux/of.h>
28#include <linux/of_address.h>
29#include <linux/of_dma.h>
30#include <linux/of_irq.h>
31#include <linux/platform_device.h>
32#include <linux/scatterlist.h>
33#include <linux/sched_clock.h>
34#include <linux/slab.h>
35#include <linux/vmalloc.h>
36#include <asm/cacheflush.h>
37#include <linux/msm_gpi.h>
38#include "../dmaengine.h"
39#include "../virt-dma.h"
40#include "msm_gpi_mmio.h"
41
42/* global logging macros */
43#define GPI_LOG(gpi_dev, fmt, ...) do { \
44 if (gpi_dev->klog_lvl != LOG_LVL_MASK_ALL) \
45 dev_dbg(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
46 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl != LOG_LVL_MASK_ALL) \
47 ipc_log_string(gpi_dev->ilctxt, \
48 "%s: " fmt, __func__, ##__VA_ARGS__); \
49 } while (0)
50#define GPI_ERR(gpi_dev, fmt, ...) do { \
51 if (gpi_dev->klog_lvl >= LOG_LVL_ERROR) \
52 dev_err(gpi_dev->dev, "%s: " fmt, __func__, ##__VA_ARGS__); \
53 if (gpi_dev->ilctxt && gpi_dev->ipc_log_lvl >= LOG_LVL_ERROR) \
54 ipc_log_string(gpi_dev->ilctxt, \
55 "%s: " fmt, __func__, ##__VA_ARGS__); \
56 } while (0)
57
58/* gpii specific logging macros */
59#define GPII_REG(gpii, ch, fmt, ...) do { \
60 if (gpii->klog_lvl >= LOG_LVL_REG_ACCESS) \
61 pr_info("%s:%u:%s: " fmt, gpii->label, \
62 ch, __func__, ##__VA_ARGS__); \
63 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_REG_ACCESS) \
64 ipc_log_string(gpii->ilctxt, \
65 "ch:%u %s: " fmt, ch, \
66 __func__, ##__VA_ARGS__); \
67 } while (0)
68#define GPII_VERB(gpii, ch, fmt, ...) do { \
69 if (gpii->klog_lvl >= LOG_LVL_VERBOSE) \
70 pr_info("%s:%u:%s: " fmt, gpii->label, \
71 ch, __func__, ##__VA_ARGS__); \
72 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_VERBOSE) \
73 ipc_log_string(gpii->ilctxt, \
74 "ch:%u %s: " fmt, ch, \
75 __func__, ##__VA_ARGS__); \
76 } while (0)
77#define GPII_INFO(gpii, ch, fmt, ...) do { \
78 if (gpii->klog_lvl >= LOG_LVL_INFO) \
79 pr_info("%s:%u:%s: " fmt, gpii->label, ch, \
80 __func__, ##__VA_ARGS__); \
81 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_INFO) \
82 ipc_log_string(gpii->ilctxt, \
83 "ch:%u %s: " fmt, ch, \
84 __func__, ##__VA_ARGS__); \
85 } while (0)
86#define GPII_ERR(gpii, ch, fmt, ...) do { \
87 if (gpii->klog_lvl >= LOG_LVL_ERROR) \
88 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
89 __func__, ##__VA_ARGS__); \
90 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_ERROR) \
91 ipc_log_string(gpii->ilctxt, \
92 "ch:%u %s: " fmt, ch, \
93 __func__, ##__VA_ARGS__); \
94 } while (0)
95#define GPII_CRITIC(gpii, ch, fmt, ...) do { \
96 if (gpii->klog_lvl >= LOG_LVL_CRITICAL) \
97 pr_err("%s:%u:%s: " fmt, gpii->label, ch, \
98 __func__, ##__VA_ARGS__); \
99 if (gpii->ilctxt && gpii->ipc_log_lvl >= LOG_LVL_CRITICAL) \
100 ipc_log_string(gpii->ilctxt, \
101 "ch:%u %s: " fmt, ch, \
102 __func__, ##__VA_ARGS__); \
103 } while (0)
104
105enum DEBUG_LOG_LVL {
106 LOG_LVL_MASK_ALL,
107 LOG_LVL_CRITICAL,
108 LOG_LVL_ERROR,
109 LOG_LVL_INFO,
110 LOG_LVL_VERBOSE,
111 LOG_LVL_REG_ACCESS,
112};
113
114enum EV_PRIORITY {
115 EV_PRIORITY_ISR,
116 EV_PRIORITY_TASKLET,
117};
118
119#define GPI_DMA_DRV_NAME "gpi_dma"
120#define DEFAULT_KLOG_LVL (LOG_LVL_CRITICAL)
121#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
122#define DEFAULT_IPC_LOG_LVL (LOG_LVL_VERBOSE)
123#define IPC_LOG_PAGES (40)
124#define GPI_DBG_LOG_SIZE (SZ_1K) /* size must be power of 2 */
125#else
126#define IPC_LOG_PAGES (2)
127#define GPI_DBG_LOG_SIZE (0) /* size must be power of 2 */
128#define DEFAULT_IPC_LOG_LVL (LOG_LVL_ERROR)
129#endif
130
131#define GPI_LABEL_SIZE (256)
132#define GPI_DBG_COMMON (99)
133#define MAX_CHANNELS_PER_GPII (2)
134#define CMD_TIMEOUT_MS (50)
135#define STATE_IGNORE (U32_MAX)
136#define REQ_OF_DMA_ARGS (6) /* # of arguments required from client */
137
138struct __packed gpi_error_log_entry {
139 u32 routine : 4;
140 u32 type : 4;
141 u32 reserved0 : 4;
142 u32 code : 4;
143 u32 reserved1 : 3;
144 u32 chid : 5;
145 u32 reserved2 : 1;
146 u32 chtype : 1;
147 u32 ee : 1;
148};
149
150struct __packed xfer_compl_event {
151 u64 ptr;
152 u32 length : 24;
153 u8 code;
154 u16 status;
155 u8 type;
156 u8 chid;
157};
158
159struct __packed immediate_data_event {
160 u8 data_bytes[8];
161 u8 length : 4;
162 u8 resvd : 4;
163 u16 tre_index;
164 u8 code;
165 u16 status;
166 u8 type;
167 u8 chid;
168};
169
170struct __packed qup_notif_event {
171 u32 status;
172 u32 time;
173 u32 count :24;
174 u8 resvd;
175 u16 resvd1;
176 u8 type;
177 u8 chid;
178};
179
180struct __packed gpi_ere {
181 u32 dword[4];
182};
183
184enum GPI_EV_TYPE {
185 XFER_COMPLETE_EV_TYPE = 0x22,
186 IMMEDIATE_DATA_EV_TYPE = 0x30,
187 QUP_NOTIF_EV_TYPE = 0x31,
188 STALE_EV_TYPE = 0xFF,
189};
190
191union __packed gpi_event {
192 struct __packed xfer_compl_event xfer_compl_event;
193 struct __packed immediate_data_event immediate_data_event;
194 struct __packed qup_notif_event qup_notif_event;
195 struct __packed gpi_ere gpi_ere;
196};
197
198enum gpii_irq_settings {
199 DEFAULT_IRQ_SETTINGS,
200 MASK_IEOB_SETTINGS,
201};
202
203enum gpi_ev_state {
204 DEFAULT_EV_CH_STATE = 0,
205 EV_STATE_NOT_ALLOCATED = DEFAULT_EV_CH_STATE,
206 EV_STATE_ALLOCATED,
207 MAX_EV_STATES
208};
209
210static const char *const gpi_ev_state_str[MAX_EV_STATES] = {
211 [EV_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
212 [EV_STATE_ALLOCATED] = "ALLOCATED",
213};
214
215#define TO_GPI_EV_STATE_STR(state) ((state >= MAX_EV_STATES) ? \
216 "INVALID" : gpi_ev_state_str[state])
217
218enum gpi_ch_state {
219 DEFAULT_CH_STATE = 0x0,
220 CH_STATE_NOT_ALLOCATED = DEFAULT_CH_STATE,
221 CH_STATE_ALLOCATED = 0x1,
222 CH_STATE_STARTED = 0x2,
223 CH_STATE_STOPPED = 0x3,
224 CH_STATE_STOP_IN_PROC = 0x4,
225 CH_STATE_ERROR = 0xf,
226 MAX_CH_STATES
227};
228
229static const char *const gpi_ch_state_str[MAX_CH_STATES] = {
230 [CH_STATE_NOT_ALLOCATED] = "NOT ALLOCATED",
231 [CH_STATE_ALLOCATED] = "ALLOCATED",
232 [CH_STATE_STARTED] = "STARTED",
233 [CH_STATE_STOPPED] = "STOPPED",
234 [CH_STATE_STOP_IN_PROC] = "STOP IN PROCESS",
235 [CH_STATE_ERROR] = "ERROR",
236};
237
238#define TO_GPI_CH_STATE_STR(state) ((state >= MAX_CH_STATES) ? \
239 "INVALID" : gpi_ch_state_str[state])
240
241enum gpi_cmd {
242 GPI_CH_CMD_BEGIN,
243 GPI_CH_CMD_ALLOCATE = GPI_CH_CMD_BEGIN,
244 GPI_CH_CMD_START,
245 GPI_CH_CMD_STOP,
246 GPI_CH_CMD_RESET,
247 GPI_CH_CMD_DE_ALLOC,
248 GPI_CH_CMD_UART_SW_STALE,
249 GPI_CH_CMD_UART_RFR_READY,
250 GPI_CH_CMD_UART_RFR_NOT_READY,
251 GPI_CH_CMD_END = GPI_CH_CMD_UART_RFR_NOT_READY,
252 GPI_EV_CMD_BEGIN,
253 GPI_EV_CMD_ALLOCATE = GPI_EV_CMD_BEGIN,
254 GPI_EV_CMD_RESET,
255 GPI_EV_CMD_DEALLOC,
256 GPI_EV_CMD_END = GPI_EV_CMD_DEALLOC,
257 GPI_MAX_CMD,
258};
259
260#define IS_CHAN_CMD(cmd) (cmd <= GPI_CH_CMD_END)
261
262static const char *const gpi_cmd_str[GPI_MAX_CMD] = {
263 [GPI_CH_CMD_ALLOCATE] = "CH ALLOCATE",
264 [GPI_CH_CMD_START] = "CH START",
265 [GPI_CH_CMD_STOP] = "CH STOP",
266 [GPI_CH_CMD_RESET] = "CH_RESET",
267 [GPI_CH_CMD_DE_ALLOC] = "DE ALLOC",
268 [GPI_CH_CMD_UART_SW_STALE] = "UART SW STALE",
269 [GPI_CH_CMD_UART_RFR_READY] = "UART RFR READY",
270 [GPI_CH_CMD_UART_RFR_NOT_READY] = "UART RFR NOT READY",
271 [GPI_EV_CMD_ALLOCATE] = "EV ALLOCATE",
272 [GPI_EV_CMD_RESET] = "EV RESET",
273 [GPI_EV_CMD_DEALLOC] = "EV DEALLOC",
274};
275
276#define TO_GPI_CMD_STR(cmd) ((cmd >= GPI_MAX_CMD) ? "INVALID" : \
277 gpi_cmd_str[cmd])
278
279static const char *const gpi_cb_event_str[MSM_GPI_QUP_MAX_EVENT] = {
280 [MSM_GPI_QUP_NOTIFY] = "NOTIFY",
281 [MSM_GPI_QUP_ERROR] = "GLOBAL ERROR",
282 [MSM_GPI_QUP_CH_ERROR] = "CHAN ERROR",
283 [MSM_GPI_QUP_PENDING_EVENT] = "PENDING EVENT",
284 [MSM_GPI_QUP_EOT_DESC_MISMATCH] = "EOT/DESC MISMATCH",
285 [MSM_GPI_QUP_SW_ERROR] = "SW ERROR",
286};
287
288#define TO_GPI_CB_EVENT_STR(event) ((event >= MSM_GPI_QUP_MAX_EVENT) ? \
289 "INVALID" : gpi_cb_event_str[event])
290
291enum se_protocol {
292 SE_PROTOCOL_SPI = 1,
293 SE_PROTOCOL_UART = 2,
294 SE_PROTOCOL_I2C = 3,
295 SE_MAX_PROTOCOL
296};
297
298/*
299 * @DISABLE_STATE: no register access allowed
300 * @CONFIG_STATE: client has configured the channel
301 * @PREP_HARDWARE: register access is allowed
302 * however, no processing EVENTS
303 * @ACTIVE_STATE: channels are fully operational
304 * @PREPARE_TERIMNATE: graceful termination of channels
305 * register access is allowed
306 * @PAUSE_STATE: channels are active, but not processing any events
307 */
308enum gpi_pm_state {
309 DISABLE_STATE,
310 CONFIG_STATE,
311 PREPARE_HARDWARE,
312 ACTIVE_STATE,
313 PREPARE_TERMINATE,
314 PAUSE_STATE,
315 MAX_PM_STATE
316};
317
318#define REG_ACCESS_VALID(pm_state) (pm_state >= PREPARE_HARDWARE)
319
320static const char *const gpi_pm_state_str[MAX_PM_STATE] = {
321 [DISABLE_STATE] = "DISABLE",
322 [CONFIG_STATE] = "CONFIG",
323 [PREPARE_HARDWARE] = "PREPARE HARDWARE",
324 [ACTIVE_STATE] = "ACTIVE",
325 [PREPARE_TERMINATE] = "PREPARE TERMINATE",
326 [PAUSE_STATE] = "PAUSE",
327};
328
329#define TO_GPI_PM_STR(state) ((state >= MAX_PM_STATE) ? \
330 "INVALID" : gpi_pm_state_str[state])
331
332static const struct {
333 enum gpi_cmd gpi_cmd;
334 u32 opcode;
335 u32 state;
336 u32 timeout_ms;
337} gpi_cmd_info[GPI_MAX_CMD] = {
338 {
339 GPI_CH_CMD_ALLOCATE,
340 GPI_GPII_n_CH_CMD_ALLOCATE,
341 CH_STATE_ALLOCATED,
342 CMD_TIMEOUT_MS,
343 },
344 {
345 GPI_CH_CMD_START,
346 GPI_GPII_n_CH_CMD_START,
347 CH_STATE_STARTED,
348 CMD_TIMEOUT_MS,
349 },
350 {
351 GPI_CH_CMD_STOP,
352 GPI_GPII_n_CH_CMD_STOP,
353 CH_STATE_STOPPED,
354 CMD_TIMEOUT_MS,
355 },
356 {
357 GPI_CH_CMD_RESET,
358 GPI_GPII_n_CH_CMD_RESET,
359 CH_STATE_ALLOCATED,
360 CMD_TIMEOUT_MS,
361 },
362 {
363 GPI_CH_CMD_DE_ALLOC,
364 GPI_GPII_n_CH_CMD_DE_ALLOC,
365 CH_STATE_NOT_ALLOCATED,
366 CMD_TIMEOUT_MS,
367 },
368 {
369 GPI_CH_CMD_UART_SW_STALE,
370 GPI_GPII_n_CH_CMD_UART_SW_STALE,
371 STATE_IGNORE,
372 CMD_TIMEOUT_MS,
373 },
374 {
375 GPI_CH_CMD_UART_RFR_READY,
376 GPI_GPII_n_CH_CMD_UART_RFR_READY,
377 STATE_IGNORE,
378 CMD_TIMEOUT_MS,
379 },
380 {
381 GPI_CH_CMD_UART_RFR_NOT_READY,
382 GPI_GPII_n_CH_CMD_UART_RFR_NOT_READY,
383 STATE_IGNORE,
384 CMD_TIMEOUT_MS,
385 },
386 {
387 GPI_EV_CMD_ALLOCATE,
388 GPI_GPII_n_EV_CH_CMD_ALLOCATE,
389 EV_STATE_ALLOCATED,
390 CMD_TIMEOUT_MS,
391 },
392 {
393 GPI_EV_CMD_RESET,
394 GPI_GPII_n_EV_CH_CMD_RESET,
395 EV_STATE_ALLOCATED,
396 CMD_TIMEOUT_MS,
397 },
398 {
399 GPI_EV_CMD_DEALLOC,
400 GPI_GPII_n_EV_CH_CMD_DE_ALLOC,
401 EV_STATE_NOT_ALLOCATED,
402 CMD_TIMEOUT_MS,
403 },
404};
405
406struct gpi_ring {
407 void *pre_aligned;
408 size_t alloc_size;
409 phys_addr_t phys_addr;
410 dma_addr_t dma_handle;
411 void *base;
412 void *wp;
413 void *rp;
414 u32 len;
415 u32 el_size;
416 u32 elements;
417 bool configured;
418};
419
420struct sg_tre {
421 void *ptr;
422 void *wp; /* store chan wp for debugging */
423};
424
425struct gpi_dbg_log {
426 void *addr;
427 u64 time;
428 u32 val;
429 bool read;
430};
431
432struct gpi_dev {
433 struct dma_device dma_device;
434 struct device *dev;
435 struct resource *res;
436 void __iomem *regs;
437 u32 max_gpii; /* maximum # of gpii instances available per gpi block */
438 u32 gpii_mask; /* gpii instances available for apps */
439 u32 ev_factor; /* ev ring length factor */
440 struct gpii *gpiis;
441 void *ilctxt;
442 u32 ipc_log_lvl;
443 u32 klog_lvl;
444 struct dentry *dentry;
445};
446
447struct gpii_chan {
448 struct virt_dma_chan vc;
449 u32 chid;
450 u32 seid;
451 enum se_protocol protocol;
452 enum EV_PRIORITY priority; /* comes from clients DT node */
453 struct gpii *gpii;
454 enum gpi_ch_state ch_state;
455 enum gpi_pm_state pm_state;
456 void __iomem *ch_cntxt_base_reg;
457 void __iomem *ch_cntxt_db_reg;
458 void __iomem *ch_ring_base_lsb_reg,
459 *ch_ring_rp_lsb_reg,
460 *ch_ring_wp_lsb_reg;
461 void __iomem *ch_cmd_reg;
462 u32 req_tres; /* # of tre's client requested */
463 u32 dir;
464 struct gpi_ring ch_ring;
465 struct gpi_ring sg_ring; /* points to client scatterlist */
466 struct gpi_client_info client_info;
467};
468
469struct gpii {
470 u32 gpii_id;
471 struct gpii_chan gpii_chan[MAX_CHANNELS_PER_GPII];
472 struct gpi_dev *gpi_dev;
473 enum EV_PRIORITY ev_priority;
474 enum se_protocol protocol;
475 int irq;
476 void __iomem *regs; /* points to gpi top */
477 void __iomem *ev_cntxt_base_reg;
478 void __iomem *ev_cntxt_db_reg;
479 void __iomem *ev_ring_base_lsb_reg,
480 *ev_ring_rp_lsb_reg,
481 *ev_ring_wp_lsb_reg;
482 void __iomem *ev_cmd_reg;
483 void __iomem *ieob_src_reg;
484 void __iomem *ieob_clr_reg;
485 struct mutex ctrl_lock;
486 enum gpi_ev_state ev_state;
487 bool configured_irq;
488 enum gpi_pm_state pm_state;
489 rwlock_t pm_lock;
490 struct gpi_ring ev_ring;
491 struct tasklet_struct ev_task; /* event processing tasklet */
492 struct completion cmd_completion;
493 enum gpi_cmd gpi_cmd;
494 u32 cntxt_type_irq_msk;
495 void *ilctxt;
496 u32 ipc_log_lvl;
497 u32 klog_lvl;
498 struct gpi_dbg_log dbg_log[GPI_DBG_LOG_SIZE];
499 atomic_t dbg_index;
500 char label[GPI_LABEL_SIZE];
501 struct dentry *dentry;
502};
503
504struct gpi_desc {
505 struct virt_dma_desc vd;
506 void *wp; /* points to TRE last queued during issue_pending */
507 struct sg_tre *sg_tre; /* points to last scatterlist */
508 void *db; /* DB register to program */
509 struct gpii_chan *gpii_chan;
510};
511
512const u32 GPII_CHAN_DIR[MAX_CHANNELS_PER_GPII] = {
513 GPI_CHTYPE_DIR_OUT, GPI_CHTYPE_DIR_IN
514};
515
516struct dentry *pdentry;
517static irqreturn_t gpi_handle_irq(int irq, void *data);
518static void gpi_ring_recycle_ev_element(struct gpi_ring *ring);
519static int gpi_ring_add_element(struct gpi_ring *ring, void **wp);
520static void gpi_process_events(struct gpii *gpii);
521
522static inline struct gpii_chan *to_gpii_chan(struct dma_chan *dma_chan)
523{
524 return container_of(dma_chan, struct gpii_chan, vc.chan);
525}
526
527static inline struct gpi_desc *to_gpi_desc(struct virt_dma_desc *vd)
528{
529 return container_of(vd, struct gpi_desc, vd);
530}
531
532static inline phys_addr_t to_physical(const struct gpi_ring *const ring,
533 void *addr)
534{
535 return ring->phys_addr + (addr - ring->base);
536}
537
538static inline void *to_virtual(const struct gpi_ring *const ring,
539 phys_addr_t addr)
540{
541 return ring->base + (addr - ring->phys_addr);
542}
543
544#ifdef CONFIG_QCOM_GPI_DMA_DEBUG
545static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
546{
547 u64 time = sched_clock();
548 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
549 u32 val;
550
551 val = readl_relaxed(addr);
552 index &= (GPI_DBG_LOG_SIZE - 1);
553 (gpii->dbg_log + index)->addr = addr;
554 (gpii->dbg_log + index)->time = time;
555 (gpii->dbg_log + index)->val = val;
556 (gpii->dbg_log + index)->read = true;
557 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
558 addr - gpii->regs, val);
559 return val;
560}
561static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
562{
563 u64 time = sched_clock();
564 unsigned int index = atomic_inc_return(&gpii->dbg_index) - 1;
565
566 index &= (GPI_DBG_LOG_SIZE - 1);
567 (gpii->dbg_log + index)->addr = addr;
568 (gpii->dbg_log + index)->time = time;
569 (gpii->dbg_log + index)->val = val;
570 (gpii->dbg_log + index)->read = false;
571
572 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
573 addr - gpii->regs, val);
574 writel_relaxed(val, addr);
575}
576#else
577static inline u32 gpi_read_reg(struct gpii *gpii, void __iomem *addr)
578{
579 u32 val = readl_relaxed(addr);
580
581 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
582 addr - gpii->regs, val);
583 return val;
584}
585static inline void gpi_write_reg(struct gpii *gpii, void __iomem *addr, u32 val)
586{
587 GPII_REG(gpii, GPI_DBG_COMMON, "offset:0x%lx val:0x%x\n",
588 addr - gpii->regs, val);
589 writel_relaxed(val, addr);
590}
591#endif
592
593/* gpi_write_reg_field - write to specific bit field */
594static inline void gpi_write_reg_field(struct gpii *gpii,
595 void __iomem *addr,
596 u32 mask,
597 u32 shift,
598 u32 val)
599{
600 u32 tmp = gpi_read_reg(gpii, addr);
601
602 tmp &= ~mask;
603 val = tmp | ((val << shift) & mask);
604 gpi_write_reg(gpii, addr, val);
605}
606
607static void gpi_disable_interrupts(struct gpii *gpii)
608{
609 struct {
610 u32 offset;
611 u32 mask;
612 u32 shift;
613 u32 val;
614 } default_reg[] = {
615 {
616 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
617 (gpii->gpii_id),
618 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
619 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
620 0,
621 },
622 {
623 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
624 (gpii->gpii_id),
625 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
626 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
627 0,
628 },
629 {
630 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
631 (gpii->gpii_id),
632 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
633 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
634 0,
635 },
636 {
637 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
638 (gpii->gpii_id),
639 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
640 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
641 0,
642 },
643 {
644 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
645 (gpii->gpii_id),
646 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
647 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
648 0,
649 },
650 {
651 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
652 (gpii->gpii_id),
653 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
654 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
655 0,
656 },
657 {
658 GPI_GPII_n_CNTXT_INTSET_OFFS
659 (gpii->gpii_id),
660 GPI_GPII_n_CNTXT_INTSET_BMSK,
661 GPI_GPII_n_CNTXT_INTSET_SHFT,
662 0,
663 },
664 { 0 },
665 };
666 int i;
667
668 for (i = 0; default_reg[i].offset; i++)
669 gpi_write_reg_field(gpii, gpii->regs +
670 default_reg[i].offset,
671 default_reg[i].mask,
672 default_reg[i].shift,
673 default_reg[i].val);
674 gpii->cntxt_type_irq_msk = 0;
675 devm_free_irq(gpii->gpi_dev->dev, gpii->irq, gpii);
676 gpii->configured_irq = false;
677}
678
679/* configure and enable interrupts */
680static int gpi_config_interrupts(struct gpii *gpii,
681 enum gpii_irq_settings settings,
682 bool mask)
683{
684 int ret;
685 int i;
686 const u32 def_type = (GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GENERAL |
687 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB |
688 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB |
689 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL |
690 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
691 struct {
692 u32 offset;
693 u32 mask;
694 u32 shift;
695 u32 val;
696 } default_reg[] = {
697 {
698 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS
699 (gpii->gpii_id),
700 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
701 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
702 def_type,
703 },
704 {
705 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_OFFS
706 (gpii->gpii_id),
707 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
708 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_SHFT,
709 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_MSK_BMSK,
710 },
711 {
712 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_OFFS
713 (gpii->gpii_id),
714 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
715 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_SHFT,
716 GPI_GPII_n_CNTXT_SRC_CH_IRQ_MSK_BMSK,
717 },
718 {
719 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_OFFS
720 (gpii->gpii_id),
721 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
722 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_SHFT,
723 GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_MSK_BMSK,
724 },
725 {
726 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_OFFS
727 (gpii->gpii_id),
728 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_BMSK,
729 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_SHFT,
730 GPI_GPII_n_CNTXT_GLOB_IRQ_EN_ERROR_INT,
731 },
732 {
733 GPI_GPII_n_CNTXT_GPII_IRQ_EN_OFFS
734 (gpii->gpii_id),
735 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
736 GPI_GPII_n_CNTXT_GPII_IRQ_EN_SHFT,
737 GPI_GPII_n_CNTXT_GPII_IRQ_EN_BMSK,
738 },
739 {
740 GPI_GPII_n_CNTXT_MSI_BASE_LSB_OFFS
741 (gpii->gpii_id),
742 U32_MAX,
743 0,
744 0x0,
745 },
746 {
747 GPI_GPII_n_CNTXT_MSI_BASE_MSB_OFFS
748 (gpii->gpii_id),
749 U32_MAX,
750 0,
751 0x0,
752 },
753 {
754 GPI_GPII_n_CNTXT_SCRATCH_0_OFFS
755 (gpii->gpii_id),
756 U32_MAX,
757 0,
758 0x0,
759 },
760 {
761 GPI_GPII_n_CNTXT_SCRATCH_1_OFFS
762 (gpii->gpii_id),
763 U32_MAX,
764 0,
765 0x0,
766 },
767 {
768 GPI_GPII_n_CNTXT_INTSET_OFFS
769 (gpii->gpii_id),
770 GPI_GPII_n_CNTXT_INTSET_BMSK,
771 GPI_GPII_n_CNTXT_INTSET_SHFT,
772 0x01,
773 },
774 {
775 GPI_GPII_n_ERROR_LOG_OFFS
776 (gpii->gpii_id),
777 U32_MAX,
778 0,
779 0x00,
780 },
781 { 0 },
782 };
783
784 GPII_VERB(gpii, GPI_DBG_COMMON, "configured:%c setting:%s mask:%c\n",
785 (gpii->configured_irq) ? 'F' : 'T',
786 (settings == DEFAULT_IRQ_SETTINGS) ? "default" : "user_spec",
787 (mask) ? 'T' : 'F');
788
789 if (gpii->configured_irq == false) {
790 ret = devm_request_irq(gpii->gpi_dev->dev, gpii->irq,
791 gpi_handle_irq, IRQF_TRIGGER_HIGH,
792 gpii->label, gpii);
793 if (ret < 0) {
794 GPII_CRITIC(gpii, GPI_DBG_COMMON,
795 "error request irq:%d ret:%d\n",
796 gpii->irq, ret);
797 return ret;
798 }
799 }
800
801 if (settings == MASK_IEOB_SETTINGS) {
802 /*
803 * GPII only uses one EV ring per gpii so we can globally
804 * enable/disable IEOB interrupt
805 */
806 if (mask)
807 gpii->cntxt_type_irq_msk |=
808 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
809 else
810 gpii->cntxt_type_irq_msk &=
811 ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB);
812 gpi_write_reg_field(gpii, gpii->regs +
813 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_OFFS(gpii->gpii_id),
814 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_BMSK,
815 GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_SHFT,
816 gpii->cntxt_type_irq_msk);
817 } else {
818 for (i = 0; default_reg[i].offset; i++)
819 gpi_write_reg_field(gpii, gpii->regs +
820 default_reg[i].offset,
821 default_reg[i].mask,
822 default_reg[i].shift,
823 default_reg[i].val);
824 gpii->cntxt_type_irq_msk = def_type;
825 };
826
827 gpii->configured_irq = true;
828
829 return 0;
830}
831
832/* Sends gpii event or channel command */
833static int gpi_send_cmd(struct gpii *gpii,
834 struct gpii_chan *gpii_chan,
835 enum gpi_cmd gpi_cmd)
836{
837 u32 chid = MAX_CHANNELS_PER_GPII;
838 u32 cmd;
839 unsigned long timeout;
840 void __iomem *cmd_reg;
841
842 if (gpi_cmd >= GPI_MAX_CMD)
843 return -EINVAL;
844 if (IS_CHAN_CMD(gpi_cmd))
845 chid = gpii_chan->chid;
846
847 GPII_INFO(gpii, chid,
848 "sending cmd: %s\n", TO_GPI_CMD_STR(gpi_cmd));
849
850 /* send opcode and wait for completion */
851 reinit_completion(&gpii->cmd_completion);
852 gpii->gpi_cmd = gpi_cmd;
853
854 cmd_reg = IS_CHAN_CMD(gpi_cmd) ? gpii_chan->ch_cmd_reg :
855 gpii->ev_cmd_reg;
856 cmd = IS_CHAN_CMD(gpi_cmd) ?
857 GPI_GPII_n_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, chid) :
858 GPI_GPII_n_EV_CH_CMD(gpi_cmd_info[gpi_cmd].opcode, 0);
859 gpi_write_reg(gpii, cmd_reg, cmd);
860 timeout = wait_for_completion_timeout(&gpii->cmd_completion,
861 msecs_to_jiffies(gpi_cmd_info[gpi_cmd].timeout_ms));
862
863 if (!timeout) {
864 GPII_ERR(gpii, chid, "cmd: %s completion timeout\n",
865 TO_GPI_CMD_STR(gpi_cmd));
866 return -EIO;
867 }
868
869 /* confirm new ch state is correct , if the cmd is a state change cmd */
870 if (gpi_cmd_info[gpi_cmd].state == STATE_IGNORE)
871 return 0;
872 if (IS_CHAN_CMD(gpi_cmd) &&
873 gpii_chan->ch_state == gpi_cmd_info[gpi_cmd].state)
874 return 0;
875 if (!IS_CHAN_CMD(gpi_cmd) &&
876 gpii->ev_state == gpi_cmd_info[gpi_cmd].state)
877 return 0;
878
879 return -EIO;
880}
881
882/* program transfer ring DB register */
883static inline void gpi_write_ch_db(struct gpii_chan *gpii_chan,
884 struct gpi_ring *ring,
885 void *wp)
886{
887 struct gpii *gpii = gpii_chan->gpii;
888 phys_addr_t p_wp;
889
890 p_wp = to_physical(ring, wp);
891 gpi_write_reg(gpii, gpii_chan->ch_cntxt_db_reg, (u32)p_wp);
892}
893
894/* program event ring DB register */
895static inline void gpi_write_ev_db(struct gpii *gpii,
896 struct gpi_ring *ring,
897 void *wp)
898{
899 phys_addr_t p_wp;
900
901 p_wp = ring->phys_addr + (wp - ring->base);
902 gpi_write_reg(gpii, gpii->ev_cntxt_db_reg, (u32)p_wp);
903}
904
905/* notify client with generic event */
906static void gpi_generate_cb_event(struct gpii_chan *gpii_chan,
907 enum msm_gpi_cb_event event,
908 u64 status)
909{
910 struct gpii *gpii = gpii_chan->gpii;
911 struct gpi_client_info *client_info = &gpii_chan->client_info;
912 struct msm_gpi_cb msm_gpi_cb = {0};
913
914 GPII_ERR(gpii, gpii_chan->chid,
915 "notifying event:%s with status:%llu\n",
916 TO_GPI_CB_EVENT_STR(event), status);
917
918 msm_gpi_cb.cb_event = event;
919 msm_gpi_cb.status = status;
920 msm_gpi_cb.timestamp = sched_clock();
921 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
922 client_info->cb_param);
923}
924
925/* process transfer completion interrupt */
926static void gpi_process_ieob(struct gpii *gpii)
927{
928 u32 ieob_irq;
929
930 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
931 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
932 GPII_VERB(gpii, GPI_DBG_COMMON, "IEOB_IRQ:0x%x\n", ieob_irq);
933
934 /* process events based on priority */
935 if (likely(gpii->ev_priority >= EV_PRIORITY_TASKLET)) {
936 GPII_VERB(gpii, GPI_DBG_COMMON, "scheduling tasklet\n");
937 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 0);
938 tasklet_schedule(&gpii->ev_task);
939 } else {
940 GPII_VERB(gpii, GPI_DBG_COMMON, "processing events from isr\n");
941 gpi_process_events(gpii);
942 }
943}
944
945/* process channel control interrupt */
946static void gpi_process_ch_ctrl_irq(struct gpii *gpii)
947{
948 u32 gpii_id = gpii->gpii_id;
949 u32 offset = GPI_GPII_n_CNTXT_SRC_GPII_CH_IRQ_OFFS(gpii_id);
950 u32 ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
951 u32 chid;
952 struct gpii_chan *gpii_chan;
953 u32 state;
954
955 /* clear the status */
956 offset = GPI_GPII_n_CNTXT_SRC_CH_IRQ_CLR_OFFS(gpii_id);
957 gpi_write_reg(gpii, gpii->regs + offset, (u32)ch_irq);
958
959 for (chid = 0; chid < MAX_CHANNELS_PER_GPII; chid++) {
960 if (!(BIT(chid) & ch_irq))
961 continue;
962
963 gpii_chan = &gpii->gpii_chan[chid];
964 GPII_VERB(gpii, chid, "processing channel ctrl irq\n");
965 state = gpi_read_reg(gpii, gpii_chan->ch_cntxt_base_reg +
966 CNTXT_0_CONFIG);
967 state = (state & GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_BMSK) >>
968 GPI_GPII_n_CH_k_CNTXT_0_CHSTATE_SHFT;
969
970 /*
971 * CH_CMD_DEALLOC cmd always successful. However cmd does
972 * not change hardware status. So overwriting software state
973 * to default state.
974 */
975 if (gpii->gpi_cmd == GPI_CH_CMD_DE_ALLOC)
976 state = DEFAULT_CH_STATE;
977 gpii_chan->ch_state = state;
978 GPII_VERB(gpii, chid, "setting channel to state:%s\n",
979 TO_GPI_CH_STATE_STR(gpii_chan->ch_state));
980
981 /*
982 * Triggering complete all if ch_state is not a stop in process.
983 * Stop in process is a transition state and we will wait for
984 * stop interrupt before notifying.
985 */
986 if (gpii_chan->ch_state != CH_STATE_STOP_IN_PROC)
987 complete_all(&gpii->cmd_completion);
988
989 /* notifying clients if in error state */
990 if (gpii_chan->ch_state == CH_STATE_ERROR)
991 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_CH_ERROR,
992 __LINE__);
993 }
994}
995
996/* processing gpi level error interrupts */
997static void gpi_process_glob_err_irq(struct gpii *gpii)
998{
999 u32 gpii_id = gpii->gpii_id;
1000 u32 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_STTS_OFFS(gpii_id);
1001 u32 irq_stts = gpi_read_reg(gpii, gpii->regs + offset);
1002 u32 error_log;
1003 u32 chid;
1004 struct gpii_chan *gpii_chan;
1005 struct gpi_client_info *client_info;
1006 struct msm_gpi_cb msm_gpi_cb;
1007 struct gpi_error_log_entry *log_entry =
1008 (struct gpi_error_log_entry *)&error_log;
1009
1010 offset = GPI_GPII_n_CNTXT_GLOB_IRQ_CLR_OFFS(gpii_id);
1011 gpi_write_reg(gpii, gpii->regs + offset, irq_stts);
1012
1013 /* only error interrupt should be set */
1014 if (irq_stts & ~GPI_GLOB_IRQ_ERROR_INT_MSK) {
1015 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid error status:0x%x\n",
1016 irq_stts);
1017 goto error_irq;
1018 }
1019
1020 offset = GPI_GPII_n_ERROR_LOG_OFFS(gpii_id);
1021 error_log = gpi_read_reg(gpii, gpii->regs + offset);
1022 gpi_write_reg(gpii, gpii->regs + offset, 0);
1023
1024 /* get channel info */
1025 chid = ((struct gpi_error_log_entry *)&error_log)->chid;
1026 if (unlikely(chid >= MAX_CHANNELS_PER_GPII)) {
1027 GPII_ERR(gpii, GPI_DBG_COMMON, "invalid chid reported:%u\n",
1028 chid);
1029 goto error_irq;
1030 }
1031
1032 gpii_chan = &gpii->gpii_chan[chid];
1033 client_info = &gpii_chan->client_info;
1034
1035 /* notify client with error log */
1036 msm_gpi_cb.cb_event = MSM_GPI_QUP_ERROR;
1037 msm_gpi_cb.error_log.routine = log_entry->routine;
1038 msm_gpi_cb.error_log.type = log_entry->type;
1039 msm_gpi_cb.error_log.error_code = log_entry->code;
1040 GPII_INFO(gpii, gpii_chan->chid, "sending CB event:%s\n",
1041 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1042 GPII_ERR(gpii, gpii_chan->chid,
1043 "ee:%u chtype:%u routine:%u type:%u error_code:%u\n",
1044 log_entry->ee, log_entry->chtype,
1045 msm_gpi_cb.error_log.routine,
1046 msm_gpi_cb.error_log.type,
1047 msm_gpi_cb.error_log.error_code);
1048 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1049 client_info->cb_param);
1050
1051 return;
1052
1053error_irq:
1054 for (chid = 0, gpii_chan = gpii->gpii_chan;
1055 chid < MAX_CHANNELS_PER_GPII; chid++, gpii_chan++)
1056 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_FW_ERROR,
1057 irq_stts);
1058}
1059
1060/* gpii interrupt handler */
1061static irqreturn_t gpi_handle_irq(int irq, void *data)
1062{
1063 struct gpii *gpii = data;
1064 u32 type;
1065 unsigned long flags;
1066 u32 offset;
1067 u32 gpii_id = gpii->gpii_id;
1068
1069 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1070
1071 read_lock_irqsave(&gpii->pm_lock, flags);
1072
1073 /*
1074 * States are out of sync to receive interrupt
1075 * while software state is in DISABLE state, bailing out.
1076 */
1077 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1078 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1079 "receive interrupt while in %s state\n",
1080 TO_GPI_PM_STR(gpii->pm_state));
1081 goto exit_irq;
1082 }
1083
1084 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1085 type = gpi_read_reg(gpii, gpii->regs + offset);
1086
1087 do {
1088 GPII_VERB(gpii, GPI_DBG_COMMON, "CNTXT_TYPE_IRQ:0x%08x\n",
1089 type);
1090 /* global gpii error */
1091 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB) {
1092 GPII_ERR(gpii, GPI_DBG_COMMON,
1093 "processing global error irq\n");
1094 gpi_process_glob_err_irq(gpii);
1095 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_GLOB);
1096 }
1097
1098 /* event control irq */
1099 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL) {
1100 u32 ev_state;
1101 u32 ev_ch_irq;
1102
1103 GPII_INFO(gpii, GPI_DBG_COMMON,
1104 "processing EV CTRL interrupt\n");
1105 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_OFFS(gpii_id);
1106 ev_ch_irq = gpi_read_reg(gpii, gpii->regs + offset);
1107
1108 offset = GPI_GPII_n_CNTXT_SRC_EV_CH_IRQ_CLR_OFFS
1109 (gpii_id);
1110 gpi_write_reg(gpii, gpii->regs + offset, ev_ch_irq);
1111 ev_state = gpi_read_reg(gpii, gpii->ev_cntxt_base_reg +
1112 CNTXT_0_CONFIG);
1113 ev_state &= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_BMSK;
1114 ev_state >>= GPI_GPII_n_EV_CH_k_CNTXT_0_CHSTATE_SHFT;
1115
1116 /*
1117 * CMD EV_CMD_DEALLOC is always successful. However
1118 * cmd does not change hardware status. So overwriting
1119 * software state to default state.
1120 */
1121 if (gpii->gpi_cmd == GPI_EV_CMD_DEALLOC)
1122 ev_state = DEFAULT_EV_CH_STATE;
1123
1124 gpii->ev_state = ev_state;
1125 GPII_INFO(gpii, GPI_DBG_COMMON,
1126 "setting EV state to %s\n",
1127 TO_GPI_EV_STATE_STR(gpii->ev_state));
1128 complete_all(&gpii->cmd_completion);
1129 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_EV_CTRL);
1130 }
1131
1132 /* channel control irq */
1133 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL) {
1134 GPII_INFO(gpii, GPI_DBG_COMMON,
1135 "process CH CTRL interrupts\n");
1136 gpi_process_ch_ctrl_irq(gpii);
1137 type &= ~(GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_CH_CTRL);
1138 }
1139
1140 /* transfer complete interrupt */
1141 if (type & GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB) {
1142 GPII_VERB(gpii, GPI_DBG_COMMON,
1143 "process IEOB interrupts\n");
1144 gpi_process_ieob(gpii);
1145 type &= ~GPI_GPII_n_CNTXT_TYPE_IRQ_MSK_IEOB;
1146 }
1147
1148 if (type) {
1149 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1150 "Unhandled interrupt status:0x%x\n", type);
1151 goto exit_irq;
1152 }
1153 offset = GPI_GPII_n_CNTXT_TYPE_IRQ_OFFS(gpii->gpii_id);
1154 type = gpi_read_reg(gpii, gpii->regs + offset);
1155 } while (type);
1156
1157exit_irq:
1158 read_unlock_irqrestore(&gpii->pm_lock, flags);
1159 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1160
1161 return IRQ_HANDLED;
1162}
1163
1164/* process qup notification events */
1165static void gpi_process_qup_notif_event(struct gpii_chan *gpii_chan,
1166 struct qup_notif_event *notif_event)
1167{
1168 struct gpii *gpii = gpii_chan->gpii;
1169 struct gpi_client_info *client_info = &gpii_chan->client_info;
1170 struct msm_gpi_cb msm_gpi_cb;
1171
1172 GPII_VERB(gpii, gpii_chan->chid,
1173 "status:0x%x time:0x%x count:0x%x\n",
1174 notif_event->status, notif_event->time, notif_event->count);
1175
1176 msm_gpi_cb.cb_event = MSM_GPI_QUP_NOTIFY;
1177 msm_gpi_cb.status = notif_event->status;
1178 msm_gpi_cb.timestamp = notif_event->time;
1179 msm_gpi_cb.count = notif_event->count;
1180 GPII_VERB(gpii, gpii_chan->chid, "sending CB event:%s\n",
1181 TO_GPI_CB_EVENT_STR(msm_gpi_cb.cb_event));
1182 client_info->callback(&gpii_chan->vc.chan, &msm_gpi_cb,
1183 client_info->cb_param);
1184}
1185
1186/* process DMA Immediate completion data events */
1187static void gpi_process_imed_data_event(struct gpii_chan *gpii_chan,
1188 struct immediate_data_event *imed_event)
1189{
1190 struct gpii *gpii = gpii_chan->gpii;
1191 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1192 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1193 struct virt_dma_desc *vd;
1194 struct gpi_desc *gpi_desc;
1195 struct msm_gpi_tre *client_tre;
1196 void *sg_tre;
1197 void *tre = ch_ring->base +
1198 (ch_ring->el_size * imed_event->tre_index);
1199 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1200
1201 /*
1202 * If channel not active don't process event but let
1203 * client know pending event is available
1204 */
1205 if (gpii_chan->pm_state != ACTIVE_STATE) {
1206 GPII_ERR(gpii, gpii_chan->chid,
1207 "skipping processing event because ch @ %s state\n",
1208 TO_GPI_PM_STR(gpii_chan->pm_state));
1209 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1210 __LINE__);
1211 return;
1212 }
1213
1214 spin_lock_irq(&gpii_chan->vc.lock);
1215 vd = vchan_next_desc(&gpii_chan->vc);
1216 if (!vd) {
1217 struct gpi_ere *gpi_ere;
1218 struct msm_gpi_tre *gpi_tre;
1219
1220 spin_unlock_irq(&gpii_chan->vc.lock);
1221 GPII_ERR(gpii, gpii_chan->chid,
1222 "event without a pending descriptor!\n");
1223 gpi_ere = (struct gpi_ere *)imed_event;
1224 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1225 gpi_ere->dword[0], gpi_ere->dword[1],
1226 gpi_ere->dword[2], gpi_ere->dword[3]);
1227 gpi_tre = tre;
1228 GPII_ERR(gpii, gpii_chan->chid,
1229 "Pending TRE: %08x %08x %08x %08x\n",
1230 gpi_tre->dword[0], gpi_tre->dword[1],
1231 gpi_tre->dword[2], gpi_tre->dword[3]);
1232 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1233 __LINE__);
1234 return;
1235 }
1236 gpi_desc = to_gpi_desc(vd);
1237
1238 /* Event TR RP gen. don't match descriptor TR */
1239 if (gpi_desc->wp != tre) {
1240 spin_unlock_irq(&gpii_chan->vc.lock);
1241 GPII_ERR(gpii, gpii_chan->chid,
1242 "EOT/EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1243 to_physical(ch_ring, gpi_desc->wp),
1244 to_physical(ch_ring, tre));
1245 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1246 __LINE__);
1247 return;
1248 }
1249
1250 list_del(&vd->node);
1251 spin_unlock_irq(&gpii_chan->vc.lock);
1252
1253 sg_tre = gpi_desc->sg_tre;
1254 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1255
1256 /*
1257 * RP pointed by Event is to last TRE processed,
1258 * we need to update ring rp to tre + 1
1259 */
1260 tre += ch_ring->el_size;
1261 if (tre >= (ch_ring->base + ch_ring->len))
1262 tre = ch_ring->base;
1263 ch_ring->rp = tre;
1264 sg_tre += sg_ring->el_size;
1265 if (sg_tre >= (sg_ring->base + sg_ring->len))
1266 sg_tre = sg_ring->base;
1267 sg_ring->rp = sg_tre;
1268
1269 /* make sure rp updates are immediately visible to all cores */
1270 smp_wmb();
1271
1272 /* update Immediate data from Event back in to TRE if it's RX channel */
1273 if (gpii_chan->dir == GPI_CHTYPE_DIR_IN) {
1274 client_tre->dword[0] =
1275 ((struct msm_gpi_tre *)imed_event)->dword[0];
1276 client_tre->dword[1] =
1277 ((struct msm_gpi_tre *)imed_event)->dword[1];
1278 client_tre->dword[2] = MSM_GPI_DMA_IMMEDIATE_TRE_DWORD2(
1279 imed_event->length);
1280 }
1281
1282 tx_cb_param = vd->tx.callback_param;
1283 if (tx_cb_param) {
1284 GPII_VERB(gpii, gpii_chan->chid,
1285 "cb_length:%u compl_code:0x%x status:0x%x\n",
1286 imed_event->length, imed_event->code,
1287 imed_event->status);
1288 tx_cb_param->length = imed_event->length;
1289 tx_cb_param->completion_code = imed_event->code;
1290 tx_cb_param->status = imed_event->status;
1291 }
1292
1293 spin_lock_irq(&gpii_chan->vc.lock);
1294 vchan_cookie_complete(vd);
1295 spin_unlock_irq(&gpii_chan->vc.lock);
1296}
1297
1298/* processing transfer completion events */
1299static void gpi_process_xfer_compl_event(struct gpii_chan *gpii_chan,
1300 struct xfer_compl_event *compl_event)
1301{
1302 struct gpii *gpii = gpii_chan->gpii;
1303 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1304 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1305 void *ev_rp = to_virtual(ch_ring, compl_event->ptr);
1306 struct msm_gpi_tre *client_tre;
1307 struct virt_dma_desc *vd;
1308 struct msm_gpi_dma_async_tx_cb_param *tx_cb_param;
1309 struct gpi_desc *gpi_desc;
1310 void *sg_tre = NULL;
1311
1312 /* only process events on active channel */
1313 if (unlikely(gpii_chan->pm_state != ACTIVE_STATE)) {
1314 GPII_ERR(gpii, gpii_chan->chid,
1315 "skipping processing event because ch @ %s state\n",
1316 TO_GPI_PM_STR(gpii_chan->pm_state));
1317 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_PENDING_EVENT,
1318 __LINE__);
1319 return;
1320 }
1321
1322 spin_lock_irq(&gpii_chan->vc.lock);
1323 vd = vchan_next_desc(&gpii_chan->vc);
1324 if (!vd) {
1325 struct gpi_ere *gpi_ere;
1326
1327 spin_unlock_irq(&gpii_chan->vc.lock);
1328 GPII_ERR(gpii, gpii_chan->chid,
1329 "Event without a pending descriptor!\n");
1330 gpi_ere = (struct gpi_ere *)compl_event;
1331 GPII_ERR(gpii, gpii_chan->chid, "Event: %08x %08x %08x %08x\n",
1332 gpi_ere->dword[0], gpi_ere->dword[1],
1333 gpi_ere->dword[2], gpi_ere->dword[3]);
1334 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1335 __LINE__);
1336 return;
1337 }
1338
1339 gpi_desc = to_gpi_desc(vd);
1340
1341 /* TRE Event generated didn't match descriptor's TRE */
1342 if (gpi_desc->wp != ev_rp) {
1343 spin_unlock_irq(&gpii_chan->vc.lock);
1344 GPII_ERR(gpii, gpii_chan->chid,
1345 "EOT\EOB received for wrong TRE 0x%0llx != 0x%0llx\n",
1346 to_physical(ch_ring, gpi_desc->wp),
1347 to_physical(ch_ring, ev_rp));
1348 gpi_generate_cb_event(gpii_chan, MSM_GPI_QUP_EOT_DESC_MISMATCH,
1349 __LINE__);
1350 return;
1351 }
1352
1353 list_del(&vd->node);
1354 spin_unlock_irq(&gpii_chan->vc.lock);
1355
1356 sg_tre = gpi_desc->sg_tre;
1357 client_tre = ((struct sg_tre *)sg_tre)->ptr;
1358
1359 /*
1360 * RP pointed by Event is to last TRE processed,
1361 * we need to update ring rp to ev_rp + 1
1362 */
1363 ev_rp += ch_ring->el_size;
1364 if (ev_rp >= (ch_ring->base + ch_ring->len))
1365 ev_rp = ch_ring->base;
1366 ch_ring->rp = ev_rp;
1367 sg_tre += sg_ring->el_size;
1368 if (sg_tre >= (sg_ring->base + sg_ring->len))
1369 sg_tre = sg_ring->base;
1370 sg_ring->rp = sg_tre;
1371
1372 /* update must be visible to other cores */
1373 smp_wmb();
1374
1375 tx_cb_param = vd->tx.callback_param;
1376 if (tx_cb_param) {
1377 GPII_VERB(gpii, gpii_chan->chid,
1378 "cb_length:%u compl_code:0x%x status:0x%x\n",
1379 compl_event->length, compl_event->code,
1380 compl_event->status);
1381 tx_cb_param->length = compl_event->length;
1382 tx_cb_param->completion_code = compl_event->code;
1383 tx_cb_param->status = compl_event->status;
1384 }
1385
1386 spin_lock_irq(&gpii_chan->vc.lock);
1387 vchan_cookie_complete(vd);
1388 spin_unlock_irq(&gpii_chan->vc.lock);
1389}
1390
1391/* process all events */
1392static void gpi_process_events(struct gpii *gpii)
1393{
1394 struct gpi_ring *ev_ring = &gpii->ev_ring;
1395 u32 cntxt_rp, local_rp;
1396 union gpi_event *gpi_event;
1397 struct gpii_chan *gpii_chan;
1398 u32 chid, type;
1399 u32 ieob_irq;
1400
1401 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1402 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1403
1404 GPII_VERB(gpii, GPI_DBG_COMMON, "cntxt_rp: 0x08%x local_rp:0x08%x\n",
1405 cntxt_rp, local_rp);
1406
1407 do {
1408 while (local_rp != cntxt_rp) {
1409 gpi_event = ev_ring->rp;
1410 chid = gpi_event->xfer_compl_event.chid;
1411 type = gpi_event->xfer_compl_event.type;
1412 GPII_VERB(gpii, GPI_DBG_COMMON,
1413 "rp:0x%08x chid:%u type:0x%x %08x %08x %08x %08x\n",
1414 local_rp, chid, type,
1415 gpi_event->gpi_ere.dword[0],
1416 gpi_event->gpi_ere.dword[1],
1417 gpi_event->gpi_ere.dword[2],
1418 gpi_event->gpi_ere.dword[3]);
1419
1420 switch (type) {
1421 case XFER_COMPLETE_EV_TYPE:
1422 gpii_chan = &gpii->gpii_chan[chid];
1423 gpi_process_xfer_compl_event(gpii_chan,
1424 &gpi_event->xfer_compl_event);
1425 break;
1426 case STALE_EV_TYPE:
1427 GPII_VERB(gpii, GPI_DBG_COMMON,
1428 "stale event, not processing\n");
1429 break;
1430 case IMMEDIATE_DATA_EV_TYPE:
1431 gpii_chan = &gpii->gpii_chan[chid];
1432 gpi_process_imed_data_event(gpii_chan,
1433 &gpi_event->immediate_data_event);
1434 break;
1435 case QUP_NOTIF_EV_TYPE:
1436 gpii_chan = &gpii->gpii_chan[chid];
1437 gpi_process_qup_notif_event(gpii_chan,
1438 &gpi_event->qup_notif_event);
1439 break;
1440 default:
1441 GPII_VERB(gpii, GPI_DBG_COMMON,
1442 "not supported event type:0x%x\n",
1443 type);
1444 }
1445 gpi_ring_recycle_ev_element(ev_ring);
1446 local_rp = (u32)to_physical(ev_ring,
1447 (void *)ev_ring->rp);
1448 }
1449 gpi_write_ev_db(gpii, ev_ring, ev_ring->wp);
1450
1451 /* clear pending IEOB events */
1452 ieob_irq = gpi_read_reg(gpii, gpii->ieob_src_reg);
1453 gpi_write_reg(gpii, gpii->ieob_clr_reg, ieob_irq);
1454
1455 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1456 local_rp = (u32)to_physical(ev_ring, (void *)ev_ring->rp);
1457
1458 } while (cntxt_rp != local_rp);
1459
1460 GPII_VERB(gpii, GPI_DBG_COMMON, "exit: c_rp:0x%x l_rp:0x%x\n", cntxt_rp,
1461 local_rp);
1462}
1463
1464/* processing events using tasklet */
1465static void gpi_ev_tasklet(unsigned long data)
1466{
1467 struct gpii *gpii = (struct gpii *)data;
1468
1469 GPII_VERB(gpii, GPI_DBG_COMMON, "enter\n");
1470
1471 read_lock_bh(&gpii->pm_lock);
1472 if (!REG_ACCESS_VALID(gpii->pm_state)) {
1473 read_unlock_bh(&gpii->pm_lock);
1474 GPII_ERR(gpii, GPI_DBG_COMMON,
1475 "not processing any events, pm_state:%s\n",
1476 TO_GPI_PM_STR(gpii->pm_state));
1477 return;
1478 }
1479
1480 /* process the events */
1481 gpi_process_events(gpii);
1482
1483 /* enable IEOB, switching back to interrupts */
1484 gpi_config_interrupts(gpii, MASK_IEOB_SETTINGS, 1);
1485 read_unlock_bh(&gpii->pm_lock);
1486
1487 GPII_VERB(gpii, GPI_DBG_COMMON, "exit\n");
1488}
1489
1490/* marks all pending events for the channel as stale */
1491void gpi_mark_stale_events(struct gpii_chan *gpii_chan)
1492{
1493 struct gpii *gpii = gpii_chan->gpii;
1494 struct gpi_ring *ev_ring = &gpii->ev_ring;
1495 void *ev_rp;
1496 u32 cntxt_rp, local_rp;
1497
1498 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1499 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1500
1501 ev_rp = ev_ring->rp;
1502 local_rp = (u32)to_physical(ev_ring, ev_rp);
1503 while (local_rp != cntxt_rp) {
1504 union gpi_event *gpi_event = ev_rp;
1505 u32 chid = gpi_event->xfer_compl_event.chid;
1506
1507 if (chid == gpii_chan->chid)
1508 gpi_event->xfer_compl_event.type = STALE_EV_TYPE;
1509 ev_rp += ev_ring->el_size;
1510 if (ev_rp >= (ev_ring->base + ev_ring->len))
1511 ev_rp = ev_ring->base;
1512 cntxt_rp = gpi_read_reg(gpii, gpii->ev_ring_rp_lsb_reg);
1513 local_rp = (u32)to_physical(ev_ring, ev_rp);
1514 }
1515}
1516
1517/* reset sw state and issue channel reset or de-alloc */
1518static int gpi_reset_chan(struct gpii_chan *gpii_chan, enum gpi_cmd gpi_cmd)
1519{
1520 struct gpii *gpii = gpii_chan->gpii;
1521 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
1522 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
1523 unsigned long flags;
1524 LIST_HEAD(list);
1525 int ret;
1526
1527 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1528 ret = gpi_send_cmd(gpii, gpii_chan, gpi_cmd);
1529 if (ret) {
1530 GPII_ERR(gpii, gpii_chan->chid,
1531 "Error with cmd:%s ret:%d\n",
1532 TO_GPI_CMD_STR(gpi_cmd), ret);
1533 return ret;
1534 }
1535
1536 /* initialize the local ring ptrs */
1537 ch_ring->rp = ch_ring->base;
1538 ch_ring->wp = ch_ring->base;
1539 sg_ring->rp = sg_ring->base;
1540 sg_ring->wp = sg_ring->base;
1541
1542 /* visible to other cores */
1543 smp_wmb();
1544
1545 /* check event ring for any stale events */
1546 write_lock_irq(&gpii->pm_lock);
1547 gpi_mark_stale_events(gpii_chan);
1548
1549 /* remove all async descriptors */
1550 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
1551 vchan_get_all_descriptors(&gpii_chan->vc, &list);
1552 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
1553 write_unlock_irq(&gpii->pm_lock);
1554 vchan_dma_desc_free_list(&gpii_chan->vc, &list);
1555
1556 return 0;
1557}
1558
1559static int gpi_start_chan(struct gpii_chan *gpii_chan)
1560{
1561 struct gpii *gpii = gpii_chan->gpii;
1562 int ret;
1563
1564 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1565
1566 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_START);
1567 if (ret) {
1568 GPII_ERR(gpii, gpii_chan->chid,
1569 "Error with cmd:%s ret:%d\n",
1570 TO_GPI_CMD_STR(GPI_CH_CMD_START), ret);
1571 return ret;
1572 }
1573
1574 /* gpii CH is active now */
1575 write_lock_irq(&gpii->pm_lock);
1576 gpii_chan->pm_state = ACTIVE_STATE;
1577 write_unlock_irq(&gpii->pm_lock);
1578
1579 return 0;
1580}
1581
1582/* allocate and configure the transfer channel */
1583static int gpi_alloc_chan(struct gpii_chan *gpii_chan, bool send_alloc_cmd)
1584{
1585 struct gpii *gpii = gpii_chan->gpii;
1586 struct gpi_ring *ring = &gpii_chan->ch_ring;
1587 int i;
1588 int ret;
1589 struct {
1590 void *base;
1591 int offset;
1592 u32 val;
1593 } ch_reg[] = {
1594 {
1595 gpii_chan->ch_cntxt_base_reg,
1596 CNTXT_0_CONFIG,
1597 GPI_GPII_n_CH_k_CNTXT_0(ring->el_size, 0,
1598 gpii_chan->dir,
1599 GPI_CHTYPE_PROTO_GPI),
1600 },
1601 {
1602 gpii_chan->ch_cntxt_base_reg,
1603 CNTXT_1_R_LENGTH,
1604 ring->len,
1605 },
1606 {
1607 gpii_chan->ch_cntxt_base_reg,
1608 CNTXT_2_RING_BASE_LSB,
1609 (u32)ring->phys_addr,
1610 },
1611 {
1612 gpii_chan->ch_cntxt_base_reg,
1613 CNTXT_3_RING_BASE_MSB,
1614 (u32)(ring->phys_addr >> 32),
1615 },
1616 { /* program MSB of DB register with ring base */
1617 gpii_chan->ch_cntxt_db_reg,
1618 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1619 (u32)(ring->phys_addr >> 32),
1620 },
1621 {
1622 gpii->regs,
1623 GPI_GPII_n_CH_k_SCRATCH_0_OFFS(gpii->gpii_id,
1624 gpii_chan->chid),
1625 GPI_GPII_n_CH_K_SCRATCH_0(!gpii_chan->chid,
1626 gpii_chan->protocol,
1627 gpii_chan->seid),
1628 },
1629 {
1630 gpii->regs,
1631 GPI_GPII_n_CH_k_SCRATCH_1_OFFS(gpii->gpii_id,
1632 gpii_chan->chid),
1633 0,
1634 },
1635 {
1636 gpii->regs,
1637 GPI_GPII_n_CH_k_SCRATCH_2_OFFS(gpii->gpii_id,
1638 gpii_chan->chid),
1639 0,
1640 },
1641 {
1642 gpii->regs,
1643 GPI_GPII_n_CH_k_SCRATCH_3_OFFS(gpii->gpii_id,
1644 gpii_chan->chid),
1645 0,
1646 },
1647 {
1648 gpii->regs,
1649 GPI_GPII_n_CH_k_QOS_OFFS(gpii->gpii_id,
1650 gpii_chan->chid),
1651 1,
1652 },
1653 { NULL },
1654 };
1655
1656 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1657
1658 if (send_alloc_cmd) {
1659 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_ALLOCATE);
1660 if (ret) {
1661 GPII_ERR(gpii, gpii_chan->chid,
1662 "Error with cmd:%s ret:%d\n",
1663 TO_GPI_CMD_STR(GPI_CH_CMD_ALLOCATE), ret);
1664 return ret;
1665 }
1666 }
1667
1668 /* program channel cntxt registers */
1669 for (i = 0; ch_reg[i].base; i++)
1670 gpi_write_reg(gpii, ch_reg[i].base + ch_reg[i].offset,
1671 ch_reg[i].val);
1672 /* flush all the writes */
1673 wmb();
1674 return 0;
1675}
1676
1677/* allocate and configure event ring */
1678static int gpi_alloc_ev_chan(struct gpii *gpii)
1679{
1680 struct gpi_ring *ring = &gpii->ev_ring;
1681 int i;
1682 int ret;
1683 struct {
1684 void *base;
1685 int offset;
1686 u32 val;
1687 } ev_reg[] = {
1688 {
1689 gpii->ev_cntxt_base_reg,
1690 CNTXT_0_CONFIG,
1691 GPI_GPII_n_EV_CH_k_CNTXT_0(ring->el_size,
1692 GPI_INTTYPE_IRQ,
1693 GPI_CHTYPE_GPI_EV),
1694 },
1695 {
1696 gpii->ev_cntxt_base_reg,
1697 CNTXT_1_R_LENGTH,
1698 ring->len,
1699 },
1700 {
1701 gpii->ev_cntxt_base_reg,
1702 CNTXT_2_RING_BASE_LSB,
1703 (u32)ring->phys_addr,
1704 },
1705 {
1706 gpii->ev_cntxt_base_reg,
1707 CNTXT_3_RING_BASE_MSB,
1708 (u32)(ring->phys_addr >> 32),
1709 },
1710 {
1711 /* program db msg with ring base msb */
1712 gpii->ev_cntxt_db_reg,
1713 CNTXT_5_RING_RP_MSB - CNTXT_4_RING_RP_LSB,
1714 (u32)(ring->phys_addr >> 32),
1715 },
1716 {
1717 gpii->ev_cntxt_base_reg,
1718 CNTXT_8_RING_INT_MOD,
1719 0,
1720 },
1721 {
1722 gpii->ev_cntxt_base_reg,
1723 CNTXT_10_RING_MSI_LSB,
1724 0,
1725 },
1726 {
1727 gpii->ev_cntxt_base_reg,
1728 CNTXT_11_RING_MSI_MSB,
1729 0,
1730 },
1731 {
1732 gpii->ev_cntxt_base_reg,
1733 CNTXT_8_RING_INT_MOD,
1734 0,
1735 },
1736 {
1737 gpii->ev_cntxt_base_reg,
1738 CNTXT_12_RING_RP_UPDATE_LSB,
1739 0,
1740 },
1741 {
1742 gpii->ev_cntxt_base_reg,
1743 CNTXT_13_RING_RP_UPDATE_MSB,
1744 0,
1745 },
1746 { NULL },
1747 };
1748
1749 GPII_INFO(gpii, GPI_DBG_COMMON, "enter\n");
1750
1751 ret = gpi_send_cmd(gpii, NULL, GPI_EV_CMD_ALLOCATE);
1752 if (ret) {
1753 GPII_ERR(gpii, GPI_DBG_COMMON, "error with cmd:%s ret:%d\n",
1754 TO_GPI_CMD_STR(GPI_EV_CMD_ALLOCATE), ret);
1755 return ret;
1756 }
1757
1758 /* program event context */
1759 for (i = 0; ev_reg[i].base; i++)
1760 gpi_write_reg(gpii, ev_reg[i].base + ev_reg[i].offset,
1761 ev_reg[i].val);
1762
1763 /* add events to ring */
1764 ring->wp = (ring->base + ring->len - ring->el_size);
1765
1766 /* flush all the writes */
1767 wmb();
1768
1769 /* gpii is active now */
1770 write_lock_irq(&gpii->pm_lock);
1771 gpii->pm_state = ACTIVE_STATE;
1772 write_unlock_irq(&gpii->pm_lock);
1773 gpi_write_ev_db(gpii, ring, ring->wp);
1774
1775 return 0;
1776}
1777
1778/* calculate # of ERE/TRE available to queue */
1779static int gpi_ring_num_elements_avail(const struct gpi_ring * const ring)
1780{
1781 int elements = 0;
1782
1783 if (ring->wp < ring->rp)
1784 elements = ((ring->rp - ring->wp) / ring->el_size) - 1;
1785 else {
1786 elements = (ring->rp - ring->base) / ring->el_size;
1787 elements += ((ring->base + ring->len - ring->wp) /
1788 ring->el_size) - 1;
1789 }
1790
1791 return elements;
1792}
1793
1794static int gpi_ring_add_element(struct gpi_ring *ring, void **wp)
1795{
1796
1797 if (gpi_ring_num_elements_avail(ring) <= 0)
1798 return -ENOMEM;
1799
1800 *wp = ring->wp;
1801 ring->wp += ring->el_size;
1802 if (ring->wp >= (ring->base + ring->len))
1803 ring->wp = ring->base;
1804
1805 /* visible to other cores */
1806 smp_wmb();
1807
1808 return 0;
1809}
1810
1811static void gpi_ring_recycle_ev_element(struct gpi_ring *ring)
1812{
1813 /* Update the WP */
1814 ring->wp += ring->el_size;
1815 if (ring->wp >= (ring->base + ring->len))
1816 ring->wp = ring->base;
1817
1818 /* Update the RP */
1819 ring->rp += ring->el_size;
1820 if (ring->rp >= (ring->base + ring->len))
1821 ring->rp = ring->base;
1822
1823 /* visible to other cores */
1824 smp_wmb();
1825}
1826
1827static void gpi_free_ring(struct gpi_ring *ring,
1828 struct gpii *gpii)
1829{
1830 if (ring->dma_handle)
1831 dma_free_coherent(gpii->gpi_dev->dev, ring->alloc_size,
1832 ring->pre_aligned, ring->dma_handle);
1833 else
1834 vfree(ring->pre_aligned);
1835 memset(ring, 0, sizeof(*ring));
1836}
1837
1838/* allocate memory for transfer and event rings */
1839static int gpi_alloc_ring(struct gpi_ring *ring,
1840 u32 elements,
1841 u32 el_size,
1842 struct gpii *gpii,
1843 bool alloc_coherent)
1844{
1845 u64 len = elements * el_size;
1846 int bit;
1847
1848 if (alloc_coherent) {
1849 /* ring len must be power of 2 */
1850 bit = find_last_bit((unsigned long *)&len, 32);
1851 if (((1 << bit) - 1) & len)
1852 bit++;
1853 len = 1 << bit;
1854 ring->alloc_size = (len + (len - 1));
1855 GPII_INFO(gpii, GPI_DBG_COMMON,
1856 "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
1857 elements, el_size, (elements * el_size), len,
1858 ring->alloc_size);
1859 ring->pre_aligned = dma_alloc_coherent(gpii->gpi_dev->dev,
1860 ring->alloc_size,
1861 &ring->dma_handle,
1862 GFP_KERNEL);
1863 if (!ring->pre_aligned) {
1864 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1865 "could not alloc size:%lu mem for ring\n",
1866 ring->alloc_size);
1867 return -ENOMEM;
1868 }
1869
1870 /* align the physical mem */
1871 ring->phys_addr = (ring->dma_handle + (len - 1)) & ~(len - 1);
1872 ring->base = ring->pre_aligned +
1873 (ring->phys_addr - ring->dma_handle);
1874 } else {
1875 ring->pre_aligned = vmalloc(len);
1876 if (!ring->pre_aligned) {
1877 GPII_CRITIC(gpii, GPI_DBG_COMMON,
1878 "could not allocsize:%llu mem for ring\n",
1879 len);
1880 return -ENOMEM;
1881 }
1882 ring->phys_addr = 0;
1883 ring->dma_handle = 0;
1884 ring->base = ring->pre_aligned;
1885 }
1886
1887 ring->rp = ring->base;
1888 ring->wp = ring->base;
1889 ring->len = len;
1890 ring->el_size = el_size;
1891 ring->elements = ring->len / ring->el_size;
1892 memset(ring->base, 0, ring->len);
1893 ring->configured = true;
1894
1895 /* update to other cores */
1896 smp_wmb();
1897
1898 GPII_INFO(gpii, GPI_DBG_COMMON,
1899 "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
1900 ring->dma_handle, ring->phys_addr, ring->len, ring->el_size,
1901 ring->elements);
1902
1903 return 0;
1904}
1905
1906/* copy tre into transfer ring */
1907static void gpi_queue_xfer(struct gpii *gpii,
1908 struct gpii_chan *gpii_chan,
1909 struct msm_gpi_tre *gpi_tre,
1910 void **wp,
1911 struct sg_tre **sg_tre)
1912{
1913 struct msm_gpi_tre *ch_tre;
1914 int ret;
1915
1916 /* get next tre location we can copy */
1917 ret = gpi_ring_add_element(&gpii_chan->ch_ring, (void **)&ch_tre);
1918 if (unlikely(ret)) {
1919 GPII_CRITIC(gpii, gpii_chan->chid,
1920 "Error adding ring element to xfer ring\n");
1921 return;
1922 }
1923 /* get next sg tre location we can use */
1924 ret = gpi_ring_add_element(&gpii_chan->sg_ring, (void **)sg_tre);
1925 if (unlikely(ret)) {
1926 GPII_CRITIC(gpii, gpii_chan->chid,
1927 "Error adding ring element to sg ring\n");
1928 return;
1929 }
1930
1931 /* copy the tre info */
1932 memcpy(ch_tre, gpi_tre, sizeof(*ch_tre));
1933 (*sg_tre)->ptr = gpi_tre;
1934 (*sg_tre)->wp = ch_tre;
1935 *wp = ch_tre;
1936}
1937
1938/* reset and restart transfer channel */
1939int gpi_terminate_all(struct dma_chan *chan)
1940{
1941 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
1942 struct gpii *gpii = gpii_chan->gpii;
1943 int schid, echid, i;
1944 int ret = 0;
1945
1946 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
1947 mutex_lock(&gpii->ctrl_lock);
1948
1949 /*
1950 * treat both channels as a group if its protocol is not UART
1951 * STOP, RESET, or START needs to be in lockstep
1952 */
1953 schid = (gpii->protocol == SE_PROTOCOL_UART) ? gpii_chan->chid : 0;
1954 echid = (gpii->protocol == SE_PROTOCOL_UART) ? schid + 1 :
1955 MAX_CHANNELS_PER_GPII;
1956
1957 /* stop the channel */
1958 for (i = schid; i < echid; i++) {
1959 gpii_chan = &gpii->gpii_chan[i];
1960
1961 /* disable ch state so no more TRE processing */
1962 write_lock_irq(&gpii->pm_lock);
1963 gpii_chan->pm_state = PREPARE_TERMINATE;
1964 write_unlock_irq(&gpii->pm_lock);
1965
1966 /* send command to Stop the channel */
1967 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
1968 if (ret)
1969 GPII_ERR(gpii, gpii_chan->chid,
1970 "Error Stopping Channel:%d resetting anyway\n",
1971 ret);
1972 }
1973
1974 /* reset the channels (clears any pending tre) */
1975 for (i = schid; i < echid; i++) {
1976 gpii_chan = &gpii->gpii_chan[i];
1977
1978 ret = gpi_reset_chan(gpii_chan, GPI_CH_CMD_RESET);
1979 if (ret) {
1980 GPII_ERR(gpii, gpii_chan->chid,
1981 "Error resetting channel ret:%d\n", ret);
1982 goto terminate_exit;
1983 }
1984
1985 /* reprogram channel CNTXT */
1986 ret = gpi_alloc_chan(gpii_chan, false);
1987 if (ret) {
1988 GPII_ERR(gpii, gpii_chan->chid,
1989 "Error alloc_channel ret:%d\n", ret);
1990 goto terminate_exit;
1991 }
1992 }
1993
1994 /* restart the channels */
1995 for (i = schid; i < echid; i++) {
1996 gpii_chan = &gpii->gpii_chan[i];
1997
1998 ret = gpi_start_chan(gpii_chan);
1999 if (ret) {
2000 GPII_ERR(gpii, gpii_chan->chid,
2001 "Error Starting Channel ret:%d\n", ret);
2002 goto terminate_exit;
2003 }
2004 }
2005
2006terminate_exit:
2007 mutex_unlock(&gpii->ctrl_lock);
2008 return ret;
2009}
2010
2011/* pause dma transfer for all channels */
2012static int gpi_pause(struct dma_chan *chan)
2013{
2014 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2015 struct gpii *gpii = gpii_chan->gpii;
2016 int i, ret;
2017
2018 GPII_INFO(gpii, gpii_chan->chid, "Enter\n");
2019 mutex_lock(&gpii->ctrl_lock);
2020
2021 /*
2022 * pause/resume are per gpii not per channel, so
2023 * client needs to call pause only once
2024 */
2025 if (gpii->pm_state == PAUSE_STATE) {
2026 GPII_INFO(gpii, gpii_chan->chid,
2027 "channel is already paused\n");
2028 mutex_unlock(&gpii->ctrl_lock);
2029 return 0;
2030 }
2031
2032 /* send stop command to stop the channels */
2033 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2034 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_STOP);
2035 if (ret) {
2036 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2037 "Error stopping chan, ret:%d\n", ret);
2038 mutex_unlock(&gpii->ctrl_lock);
2039 return ret;
2040 }
2041 }
2042
2043 disable_irq(gpii->irq);
2044
2045 /* Wait for threads to complete out */
2046 tasklet_kill(&gpii->ev_task);
2047
2048 write_lock_irq(&gpii->pm_lock);
2049 gpii->pm_state = PAUSE_STATE;
2050 write_unlock_irq(&gpii->pm_lock);
2051 mutex_unlock(&gpii->ctrl_lock);
2052
2053 return 0;
2054}
2055
2056/* resume dma transfer */
2057static int gpi_resume(struct dma_chan *chan)
2058{
2059 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2060 struct gpii *gpii = gpii_chan->gpii;
2061 int i;
2062 int ret;
2063
2064 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2065
2066 mutex_lock(&gpii->ctrl_lock);
2067 if (gpii->pm_state == ACTIVE_STATE) {
2068 GPII_INFO(gpii, gpii_chan->chid,
2069 "channel is already active\n");
2070 mutex_unlock(&gpii->ctrl_lock);
2071 return 0;
2072 }
2073
2074 enable_irq(gpii->irq);
2075
2076 /* send start command to start the channels */
2077 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2078 ret = gpi_send_cmd(gpii, &gpii->gpii_chan[i], GPI_CH_CMD_START);
2079 if (ret) {
2080 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2081 "Erro starting chan, ret:%d\n", ret);
2082 mutex_unlock(&gpii->ctrl_lock);
2083 return ret;
2084 }
2085 }
2086
2087 write_lock_irq(&gpii->pm_lock);
2088 gpii->pm_state = ACTIVE_STATE;
2089 write_unlock_irq(&gpii->pm_lock);
2090 mutex_unlock(&gpii->ctrl_lock);
2091
2092 return 0;
2093}
2094
2095void gpi_desc_free(struct virt_dma_desc *vd)
2096{
2097 struct gpi_desc *gpi_desc = to_gpi_desc(vd);
2098
2099 kfree(gpi_desc);
2100}
2101
2102/* copy tre into transfer ring */
2103struct dma_async_tx_descriptor *gpi_prep_slave_sg(struct dma_chan *chan,
2104 struct scatterlist *sgl,
2105 unsigned int sg_len,
2106 enum dma_transfer_direction direction,
2107 unsigned long flags,
2108 void *context)
2109{
2110 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2111 struct gpii *gpii = gpii_chan->gpii;
2112 u32 nr, sg_nr;
2113 u32 nr_req = 0;
2114 int i, j;
2115 struct scatterlist *sg;
2116 struct gpi_ring *ch_ring = &gpii_chan->ch_ring;
2117 struct gpi_ring *sg_ring = &gpii_chan->sg_ring;
2118 void *tre, *wp = NULL;
2119 struct sg_tre *sg_tre = NULL;
2120 const gfp_t gfp = GFP_ATOMIC;
2121 struct gpi_desc *gpi_desc;
2122
2123 GPII_VERB(gpii, gpii_chan->chid, "enter\n");
2124
2125 if (!is_slave_direction(direction)) {
2126 GPII_ERR(gpii, gpii_chan->chid,
2127 "invalid dma direction: %d\n", direction);
2128 return NULL;
2129 }
2130
2131 /* calculate # of elements required & available */
2132 nr = gpi_ring_num_elements_avail(ch_ring);
2133 sg_nr = gpi_ring_num_elements_avail(sg_ring);
2134 for_each_sg(sgl, sg, sg_len, i) {
2135 GPII_VERB(gpii, gpii_chan->chid,
2136 "%d of %u len:%u\n", i, sg_len, sg->length);
2137 nr_req += (sg->length / ch_ring->el_size);
2138 }
2139 GPII_VERB(gpii, gpii_chan->chid,
2140 "nr_elements_avail:%u sg_avail:%u required:%u\n",
2141 nr, sg_nr, nr_req);
2142
2143 if (nr < nr_req || sg_nr < nr_req) {
2144 GPII_ERR(gpii, gpii_chan->chid,
2145 "not enough space in ring, avail:%u,%u required:%u\n",
2146 nr, sg_nr, nr_req);
2147 return NULL;
2148 }
2149
2150 gpi_desc = kzalloc(sizeof(*gpi_desc), gfp);
2151 if (!gpi_desc) {
2152 GPII_ERR(gpii, gpii_chan->chid,
2153 "out of memory for descriptor\n");
2154 return NULL;
2155 }
2156
2157 /* copy each tre into transfer ring */
2158 for_each_sg(sgl, sg, sg_len, i)
2159 for (j = 0, tre = sg_virt(sg); j < sg->length;
2160 j += ch_ring->el_size, tre += ch_ring->el_size)
2161 gpi_queue_xfer(gpii, gpii_chan, tre, &wp, &sg_tre);
2162
2163 /* set up the descriptor */
2164 gpi_desc->db = ch_ring->wp;
2165 gpi_desc->wp = wp;
2166 gpi_desc->sg_tre = sg_tre;
2167 gpi_desc->gpii_chan = gpii_chan;
2168 GPII_VERB(gpii, gpii_chan->chid, "exit wp:0x%0llx rp:0x%0llx\n",
2169 to_physical(ch_ring, ch_ring->wp),
2170 to_physical(ch_ring, ch_ring->rp));
2171
2172 return vchan_tx_prep(&gpii_chan->vc, &gpi_desc->vd, flags);
2173}
2174
2175/* rings transfer ring db to being transfer */
2176static void gpi_issue_pending(struct dma_chan *chan)
2177{
2178 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2179 struct gpii *gpii = gpii_chan->gpii;
2180 unsigned long flags, pm_lock_flags;
2181 struct virt_dma_desc *vd = NULL;
2182 struct gpi_desc *gpi_desc;
2183
2184 GPII_VERB(gpii, gpii_chan->chid, "Enter\n");
2185
2186 read_lock_irqsave(&gpii->pm_lock, pm_lock_flags);
2187
2188 /* move all submitted discriptors to issued list */
2189 spin_lock_irqsave(&gpii_chan->vc.lock, flags);
2190 if (vchan_issue_pending(&gpii_chan->vc))
2191 vd = list_last_entry(&gpii_chan->vc.desc_issued,
2192 struct virt_dma_desc, node);
2193 spin_unlock_irqrestore(&gpii_chan->vc.lock, flags);
2194
2195 /* nothing to do list is empty */
2196 if (!vd) {
2197 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2198 GPII_VERB(gpii, gpii_chan->chid, "no descriptors submitted\n");
2199 return;
2200 }
2201
2202 gpi_desc = to_gpi_desc(vd);
2203 gpi_write_ch_db(gpii_chan, &gpii_chan->ch_ring, gpi_desc->db);
2204 read_unlock_irqrestore(&gpii->pm_lock, pm_lock_flags);
2205}
2206
2207/* configure or issue async command */
2208static int gpi_config(struct dma_chan *chan,
2209 struct dma_slave_config *config)
2210{
2211 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2212 struct gpii *gpii = gpii_chan->gpii;
2213 struct msm_gpi_ctrl *gpi_ctrl = chan->private;
2214 const int ev_factor = gpii->gpi_dev->ev_factor;
2215 u32 elements;
2216 int i = 0;
2217 int ret = 0;
2218
2219 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2220 if (!gpi_ctrl) {
2221 GPII_ERR(gpii, gpii_chan->chid,
2222 "no config ctrl data provided");
2223 return -EINVAL;
2224 }
2225
2226 mutex_lock(&gpii->ctrl_lock);
2227
2228 switch (gpi_ctrl->cmd) {
2229 case MSM_GPI_INIT:
2230 GPII_INFO(gpii, gpii_chan->chid, "cmd: msm_gpi_init\n");
2231
2232 gpii_chan->client_info.callback = gpi_ctrl->init.callback;
2233 gpii_chan->client_info.cb_param = gpi_ctrl->init.cb_param;
2234 gpii_chan->pm_state = CONFIG_STATE;
2235
2236 /* check if both channels are configured before continue */
2237 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2238 if (gpii->gpii_chan[i].pm_state != CONFIG_STATE)
2239 goto exit_gpi_init;
2240
2241 /* configure to highest priority from two channels */
2242 gpii->ev_priority = min(gpii->gpii_chan[0].priority,
2243 gpii->gpii_chan[1].priority);
2244
2245 /* protocol must be same for both channels */
2246 if (gpii->gpii_chan[0].protocol !=
2247 gpii->gpii_chan[1].protocol) {
2248 GPII_ERR(gpii, gpii_chan->chid,
2249 "protocol did not match protocol %u != %u\n",
2250 gpii->gpii_chan[0].protocol,
2251 gpii->gpii_chan[1].protocol);
2252 ret = -EINVAL;
2253 goto exit_gpi_init;
2254 }
2255 gpii->protocol = gpii_chan->protocol;
2256
2257 /* allocate memory for event ring */
2258 elements = max(gpii->gpii_chan[0].req_tres,
2259 gpii->gpii_chan[1].req_tres);
2260 ret = gpi_alloc_ring(&gpii->ev_ring, elements << ev_factor,
2261 sizeof(union gpi_event), gpii, true);
2262 if (ret) {
2263 GPII_ERR(gpii, gpii_chan->chid,
2264 "error allocating mem for ev ring\n");
2265 goto exit_gpi_init;
2266 }
2267
2268 /* configure interrupts */
2269 write_lock_irq(&gpii->pm_lock);
2270 gpii->pm_state = PREPARE_HARDWARE;
2271 write_unlock_irq(&gpii->pm_lock);
2272 ret = gpi_config_interrupts(gpii, DEFAULT_IRQ_SETTINGS, 0);
2273 if (ret) {
2274 GPII_ERR(gpii, gpii_chan->chid,
2275 "error config. interrupts, ret:%d\n", ret);
2276 goto error_config_int;
2277 }
2278
2279 /* allocate event rings */
2280 ret = gpi_alloc_ev_chan(gpii);
2281 if (ret) {
2282 GPII_ERR(gpii, gpii_chan->chid,
2283 "error alloc_ev_chan:%d\n", ret);
2284 goto error_alloc_ev_ring;
2285 }
2286
2287 /* Allocate all channels */
2288 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2289 ret = gpi_alloc_chan(&gpii->gpii_chan[i], true);
2290 if (ret) {
2291 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2292 "Error allocating chan:%d\n", ret);
2293 goto error_alloc_chan;
2294 }
2295 }
2296
2297 /* start channels */
2298 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++) {
2299 ret = gpi_start_chan(&gpii->gpii_chan[i]);
2300 if (ret) {
2301 GPII_ERR(gpii, gpii->gpii_chan[i].chid,
2302 "Error start chan:%d\n", ret);
2303 goto error_start_chan;
2304 }
2305 }
2306
2307 break;
2308 case MSM_GPI_CMD_UART_SW_STALE:
2309 GPII_INFO(gpii, gpii_chan->chid, "sending UART SW STALE cmd\n");
2310 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_SW_STALE);
2311 break;
2312 case MSM_GPI_CMD_UART_RFR_READY:
2313 GPII_INFO(gpii, gpii_chan->chid,
2314 "sending UART RFR READY cmd\n");
2315 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_UART_RFR_READY);
2316 break;
2317 case MSM_GPI_CMD_UART_RFR_NOT_READY:
2318 GPII_INFO(gpii, gpii_chan->chid,
2319 "sending UART RFR READY NOT READY cmd\n");
2320 ret = gpi_send_cmd(gpii, gpii_chan,
2321 GPI_CH_CMD_UART_RFR_NOT_READY);
2322 break;
2323 default:
2324 GPII_ERR(gpii, gpii_chan->chid,
2325 "unsupported ctrl cmd:%d\n", gpi_ctrl->cmd);
2326 ret = -EINVAL;
2327 }
2328
2329 mutex_unlock(&gpii->ctrl_lock);
2330 return ret;
2331
2332error_start_chan:
2333 for (i = i - 1; i >= 0; i++) {
2334 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2335 gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2336 }
2337 i = 2;
2338error_alloc_chan:
2339 for (i = i - 1; i >= 0; i--)
2340 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2341error_alloc_ev_ring:
2342 gpi_disable_interrupts(gpii);
2343error_config_int:
2344 gpi_free_ring(&gpii->ev_ring, gpii);
2345exit_gpi_init:
2346 mutex_unlock(&gpii->ctrl_lock);
2347 return ret;
2348}
2349
2350/* release all channel resources */
2351static void gpi_free_chan_resources(struct dma_chan *chan)
2352{
2353 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2354 struct gpii *gpii = gpii_chan->gpii;
2355 enum gpi_pm_state cur_state;
2356 int ret, i;
2357
2358 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2359
2360 mutex_lock(&gpii->ctrl_lock);
2361
2362 cur_state = gpii_chan->pm_state;
2363
2364 /* disable ch state so no more TRE processing for this channel */
2365 write_lock_irq(&gpii->pm_lock);
2366 gpii_chan->pm_state = PREPARE_TERMINATE;
2367 write_unlock_irq(&gpii->pm_lock);
2368
2369 /* attemp to do graceful hardware shutdown */
2370 if (cur_state == ACTIVE_STATE) {
2371 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_STOP);
2372 if (ret)
2373 GPII_ERR(gpii, gpii_chan->chid,
2374 "error stopping channel:%d\n", ret);
2375
2376 ret = gpi_send_cmd(gpii, gpii_chan, GPI_CH_CMD_RESET);
2377 if (ret)
2378 GPII_ERR(gpii, gpii_chan->chid,
2379 "error resetting channel:%d\n", ret);
2380
2381 gpi_reset_chan(gpii_chan, GPI_CH_CMD_DE_ALLOC);
2382 }
2383
2384 /* free all allocated memory */
2385 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2386 gpi_free_ring(&gpii_chan->sg_ring, gpii);
2387 vchan_free_chan_resources(&gpii_chan->vc);
2388
2389 write_lock_irq(&gpii->pm_lock);
2390 gpii_chan->pm_state = DISABLE_STATE;
2391 write_unlock_irq(&gpii->pm_lock);
2392
2393 /* if other rings are still active exit */
2394 for (i = 0; i < MAX_CHANNELS_PER_GPII; i++)
2395 if (gpii->gpii_chan[i].ch_ring.configured)
2396 goto exit_free;
2397
2398 GPII_INFO(gpii, gpii_chan->chid, "disabling gpii\n");
2399
2400 /* deallocate EV Ring */
2401 cur_state = gpii->pm_state;
2402 write_lock_irq(&gpii->pm_lock);
2403 gpii->pm_state = PREPARE_TERMINATE;
2404 write_unlock_irq(&gpii->pm_lock);
2405
2406 /* wait for threads to complete out */
2407 tasklet_kill(&gpii->ev_task);
2408
2409 /* send command to de allocate event ring */
2410 if (cur_state == ACTIVE_STATE)
2411 gpi_send_cmd(gpii, NULL, GPI_EV_CMD_DEALLOC);
2412
2413 gpi_free_ring(&gpii->ev_ring, gpii);
2414
2415 /* disable interrupts */
2416 if (cur_state == ACTIVE_STATE)
2417 gpi_disable_interrupts(gpii);
2418
2419 /* set final state to disable */
2420 write_lock_irq(&gpii->pm_lock);
2421 gpii->pm_state = DISABLE_STATE;
2422 write_unlock_irq(&gpii->pm_lock);
2423
2424exit_free:
2425 mutex_unlock(&gpii->ctrl_lock);
2426}
2427
2428/* allocate channel resources */
2429static int gpi_alloc_chan_resources(struct dma_chan *chan)
2430{
2431 struct gpii_chan *gpii_chan = to_gpii_chan(chan);
2432 struct gpii *gpii = gpii_chan->gpii;
2433 int ret;
2434
2435 GPII_INFO(gpii, gpii_chan->chid, "enter\n");
2436
2437 mutex_lock(&gpii->ctrl_lock);
2438
2439 /* allocate memory for transfer ring */
2440 ret = gpi_alloc_ring(&gpii_chan->ch_ring, gpii_chan->req_tres,
2441 sizeof(struct msm_gpi_tre), gpii, true);
2442 if (ret) {
2443 GPII_ERR(gpii, gpii_chan->chid,
2444 "error allocating xfer ring, ret:%d\n", ret);
2445 goto xfer_alloc_err;
2446 }
2447
2448 ret = gpi_alloc_ring(&gpii_chan->sg_ring, gpii_chan->ch_ring.elements,
2449 sizeof(struct sg_tre), gpii, false);
2450 if (ret) {
2451 GPII_ERR(gpii, gpii_chan->chid,
2452 "error allocating sg ring, ret:%d\n", ret);
2453 goto sg_alloc_error;
2454 }
2455 mutex_unlock(&gpii->ctrl_lock);
2456
2457 return 0;
2458
2459sg_alloc_error:
2460 gpi_free_ring(&gpii_chan->ch_ring, gpii);
2461xfer_alloc_err:
2462 mutex_unlock(&gpii->ctrl_lock);
2463
2464 return ret;
2465}
2466
2467/* gpi_of_dma_xlate: open client requested channel */
2468static struct dma_chan *gpi_of_dma_xlate(struct of_phandle_args *args,
2469 struct of_dma *of_dma)
2470{
2471 struct gpi_dev *gpi_dev = (struct gpi_dev *)of_dma->of_dma_data;
2472 u32 gpii, chid;
2473 struct gpii_chan *gpii_chan;
2474
2475 if (args->args_count < REQ_OF_DMA_ARGS) {
2476 GPI_ERR(gpi_dev,
2477 "gpii require minimum 6 args, client passed:%d args\n",
2478 args->args_count);
2479 return NULL;
2480 }
2481
2482 /* Check if valid gpii instance */
2483 gpii = args->args[0];
2484 if (!((1 << gpii) & gpi_dev->gpii_mask)) {
2485 GPI_ERR(gpi_dev, "gpii instance:%d is not supported\n", gpii);
2486 return NULL;
2487 }
2488
2489 chid = args->args[1];
2490 if (chid >= MAX_CHANNELS_PER_GPII) {
2491 GPI_ERR(gpi_dev, "gpii channel:%d not valid\n", chid);
2492 return NULL;
2493 }
2494
2495 /* get ring size, protocol, se_id, and priority */
2496 gpii_chan = &gpi_dev->gpiis[gpii].gpii_chan[chid];
2497 gpii_chan->seid = args->args[2];
2498 gpii_chan->protocol = args->args[3];
2499 gpii_chan->req_tres = args->args[4];
2500 gpii_chan->priority = args->args[5];
2501
2502 GPI_LOG(gpi_dev,
2503 "client req. gpii:%u chid:%u #_tre:%u priority:%u protocol:%u\n",
2504 gpii, chid, gpii_chan->req_tres, gpii_chan->priority,
2505 gpii_chan->protocol);
2506
2507 return dma_get_slave_channel(&gpii_chan->vc.chan);
2508}
2509
2510/* gpi_setup_debug - setup debug capabilities */
2511static void gpi_setup_debug(struct gpi_dev *gpi_dev)
2512{
2513 char node_name[GPI_LABEL_SIZE];
2514 const umode_t mode = 0600;
2515 int i;
2516
2517 snprintf(node_name, sizeof(node_name), "%s%llx", GPI_DMA_DRV_NAME,
2518 (u64)gpi_dev->res->start);
2519
2520 gpi_dev->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2521 node_name, 0);
2522 gpi_dev->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2523 if (!IS_ERR_OR_NULL(pdentry)) {
2524 snprintf(node_name, sizeof(node_name), "%llx",
2525 (u64)gpi_dev->res->start);
2526 gpi_dev->dentry = debugfs_create_dir(node_name, pdentry);
2527 if (!IS_ERR_OR_NULL(gpi_dev->dentry)) {
2528 debugfs_create_u32("ipc_log_lvl", mode, gpi_dev->dentry,
2529 &gpi_dev->ipc_log_lvl);
2530 debugfs_create_u32("klog_lvl", mode,
2531 gpi_dev->dentry, &gpi_dev->klog_lvl);
2532 }
2533 }
2534
2535 for (i = 0; i < gpi_dev->max_gpii; i++) {
2536 struct gpii *gpii;
2537
2538 if (!((1 << i) & gpi_dev->gpii_mask))
2539 continue;
2540
2541 gpii = &gpi_dev->gpiis[i];
2542 snprintf(gpii->label, sizeof(gpii->label),
2543 "%s%llx_gpii%d",
2544 GPI_DMA_DRV_NAME, (u64)gpi_dev->res->start, i);
2545 gpii->ilctxt = ipc_log_context_create(IPC_LOG_PAGES,
2546 gpii->label, 0);
2547 gpii->ipc_log_lvl = DEFAULT_IPC_LOG_LVL;
2548 gpii->klog_lvl = DEFAULT_KLOG_LVL;
2549
2550 if (IS_ERR_OR_NULL(gpi_dev->dentry))
2551 continue;
2552
2553 snprintf(node_name, sizeof(node_name), "gpii%d", i);
2554 gpii->dentry = debugfs_create_dir(node_name, gpi_dev->dentry);
2555 if (IS_ERR_OR_NULL(gpii->dentry))
2556 continue;
2557
2558 debugfs_create_u32("ipc_log_lvl", mode, gpii->dentry,
2559 &gpii->ipc_log_lvl);
2560 debugfs_create_u32("klog_lvl", mode, gpii->dentry,
2561 &gpii->klog_lvl);
2562 }
2563}
2564
2565static int gpi_smmu_init(struct gpi_dev *gpi_dev)
2566{
2567 u64 size = U64_MAX;
2568 dma_addr_t base = 0x0;
2569 struct dma_iommu_mapping *map;
2570 int attr, ret;
2571
2572 map = arm_iommu_create_mapping(&platform_bus_type, base, size);
2573 if (IS_ERR_OR_NULL(map)) {
2574 ret = PTR_ERR(map) ? : -EIO;
2575 GPI_ERR(gpi_dev, "error create_mapping, ret:%d\n", ret);
2576 return ret;
2577 }
2578
2579 attr = 1;
2580 ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_ATOMIC, &attr);
2581 if (ret) {
2582 GPI_ERR(gpi_dev, "error setting ATTTR_ATOMIC, ret:%d\n", ret);
2583 goto error_smmu;
2584 }
2585
2586 attr = 1;
2587 ret = iommu_domain_set_attr(map->domain, DOMAIN_ATTR_S1_BYPASS, &attr);
2588 if (ret) {
2589 GPI_ERR(gpi_dev, "error setting S1_BYPASS, ret:%d\n", ret);
2590 goto error_smmu;
2591 }
2592
2593 ret = arm_iommu_attach_device(gpi_dev->dev, map);
2594 if (ret) {
2595 GPI_ERR(gpi_dev, "error iommu_attach, ret:%d\n", ret);
2596 goto error_smmu;
2597 }
2598
2599 ret = dma_set_mask(gpi_dev->dev, DMA_BIT_MASK(64));
2600 if (ret) {
2601 GPI_ERR(gpi_dev, "error setting dma_mask, ret:%d\n", ret);
2602 goto error_set_mask;
2603 }
2604
2605 return ret;
2606
2607error_set_mask:
2608 arm_iommu_detach_device(gpi_dev->dev);
2609error_smmu:
2610 arm_iommu_release_mapping(map);
2611 return ret;
2612}
2613
2614static int gpi_probe(struct platform_device *pdev)
2615{
2616 struct gpi_dev *gpi_dev;
2617 int ret, i;
2618
2619 gpi_dev = devm_kzalloc(&pdev->dev, sizeof(*gpi_dev), GFP_KERNEL);
2620 if (!gpi_dev)
2621 return -ENOMEM;
2622
2623 gpi_dev->dev = &pdev->dev;
2624 gpi_dev->klog_lvl = DEFAULT_KLOG_LVL;
2625 gpi_dev->res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
2626 "gpi-top");
2627 if (!gpi_dev->res) {
2628 GPI_ERR(gpi_dev, "missing 'reg' DT node\n");
2629 return -EINVAL;
2630 }
2631 gpi_dev->regs = devm_ioremap_nocache(gpi_dev->dev, gpi_dev->res->start,
2632 resource_size(gpi_dev->res));
2633 if (!gpi_dev->regs) {
2634 GPI_ERR(gpi_dev, "IO remap failed\n");
2635 return -EFAULT;
2636 }
2637
2638 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,max-num-gpii",
2639 &gpi_dev->max_gpii);
2640 if (ret) {
2641 GPI_ERR(gpi_dev, "missing 'max-no-gpii' DT node\n");
2642 return ret;
2643 }
2644
2645 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,gpii-mask",
2646 &gpi_dev->gpii_mask);
2647 if (ret) {
2648 GPI_ERR(gpi_dev, "missing 'gpii-mask' DT node\n");
2649 return ret;
2650 }
2651
2652 ret = of_property_read_u32(gpi_dev->dev->of_node, "qcom,ev-factor",
2653 &gpi_dev->ev_factor);
2654 if (ret) {
2655 GPI_ERR(gpi_dev, "missing 'qcom,ev-factor' DT node\n");
2656 return ret;
2657 }
2658
2659 ret = gpi_smmu_init(gpi_dev);
2660 if (ret) {
2661 GPI_ERR(gpi_dev, "error configuring smmu, ret:%d\n", ret);
2662 return ret;
2663 }
2664
2665 gpi_dev->gpiis = devm_kzalloc(gpi_dev->dev,
2666 sizeof(*gpi_dev->gpiis) * gpi_dev->max_gpii,
2667 GFP_KERNEL);
2668 if (!gpi_dev->gpiis)
2669 return -ENOMEM;
2670
2671
2672 /* setup all the supported gpii */
2673 INIT_LIST_HEAD(&gpi_dev->dma_device.channels);
2674 for (i = 0; i < gpi_dev->max_gpii; i++) {
2675 struct gpii *gpii = &gpi_dev->gpiis[i];
2676 int chan;
2677
2678 if (!((1 << i) & gpi_dev->gpii_mask))
2679 continue;
2680
2681 /* set up ev cntxt register map */
2682 gpii->ev_cntxt_base_reg = gpi_dev->regs +
2683 GPI_GPII_n_EV_CH_k_CNTXT_0_OFFS(i, 0);
2684 gpii->ev_cntxt_db_reg = gpi_dev->regs +
2685 GPI_GPII_n_EV_CH_k_DOORBELL_0_OFFS(i, 0);
2686 gpii->ev_ring_base_lsb_reg = gpii->ev_cntxt_base_reg +
2687 CNTXT_2_RING_BASE_LSB;
2688 gpii->ev_ring_rp_lsb_reg = gpii->ev_cntxt_base_reg +
2689 CNTXT_4_RING_RP_LSB;
2690 gpii->ev_ring_wp_lsb_reg = gpii->ev_cntxt_base_reg +
2691 CNTXT_6_RING_WP_LSB;
2692 gpii->ev_cmd_reg = gpi_dev->regs +
2693 GPI_GPII_n_EV_CH_CMD_OFFS(i);
2694 gpii->ieob_src_reg = gpi_dev->regs +
2695 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_OFFS(i);
2696 gpii->ieob_clr_reg = gpi_dev->regs +
2697 GPI_GPII_n_CNTXT_SRC_IEOB_IRQ_CLR_OFFS(i);
2698
2699 /* set up irq */
2700 ret = platform_get_irq(pdev, i);
2701 if (ret < 0) {
2702 GPI_ERR(gpi_dev, "could not req. irq for gpii%d ret:%d",
2703 i, ret);
2704 return ret;
2705 }
2706 gpii->irq = ret;
2707
2708 /* set up channel specific register info */
2709 for (chan = 0; chan < MAX_CHANNELS_PER_GPII; chan++) {
2710 struct gpii_chan *gpii_chan = &gpii->gpii_chan[chan];
2711
2712 /* set up ch cntxt register map */
2713 gpii_chan->ch_cntxt_base_reg = gpi_dev->regs +
2714 GPI_GPII_n_CH_k_CNTXT_0_OFFS(i, chan);
2715 gpii_chan->ch_cntxt_db_reg = gpi_dev->regs +
2716 GPI_GPII_n_CH_k_DOORBELL_0_OFFS(i, chan);
2717 gpii_chan->ch_ring_base_lsb_reg =
2718 gpii_chan->ch_cntxt_base_reg +
2719 CNTXT_2_RING_BASE_LSB;
2720 gpii_chan->ch_ring_rp_lsb_reg =
2721 gpii_chan->ch_cntxt_base_reg +
2722 CNTXT_4_RING_RP_LSB;
2723 gpii_chan->ch_ring_wp_lsb_reg =
2724 gpii_chan->ch_cntxt_base_reg +
2725 CNTXT_6_RING_WP_LSB;
2726 gpii_chan->ch_cmd_reg = gpi_dev->regs +
2727 GPI_GPII_n_CH_CMD_OFFS(i);
2728
2729 /* vchan setup */
2730 vchan_init(&gpii_chan->vc, &gpi_dev->dma_device);
2731 gpii_chan->vc.desc_free = gpi_desc_free;
2732 gpii_chan->chid = chan;
2733 gpii_chan->gpii = gpii;
2734 gpii_chan->dir = GPII_CHAN_DIR[chan];
2735 }
2736 mutex_init(&gpii->ctrl_lock);
2737 rwlock_init(&gpii->pm_lock);
2738 tasklet_init(&gpii->ev_task, gpi_ev_tasklet,
2739 (unsigned long)gpii);
2740 init_completion(&gpii->cmd_completion);
2741 gpii->gpii_id = i;
2742 gpii->regs = gpi_dev->regs;
2743 gpii->gpi_dev = gpi_dev;
2744 atomic_set(&gpii->dbg_index, 0);
2745 }
2746
2747 platform_set_drvdata(pdev, gpi_dev);
2748
2749 /* clear and Set capabilities */
2750 dma_cap_zero(gpi_dev->dma_device.cap_mask);
2751 dma_cap_set(DMA_SLAVE, gpi_dev->dma_device.cap_mask);
2752
2753 /* configure dmaengine apis */
2754 gpi_dev->dma_device.directions =
2755 BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2756 gpi_dev->dma_device.residue_granularity =
2757 DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
2758 gpi_dev->dma_device.src_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2759 gpi_dev->dma_device.dst_addr_widths = DMA_SLAVE_BUSWIDTH_8_BYTES;
2760 gpi_dev->dma_device.device_alloc_chan_resources =
2761 gpi_alloc_chan_resources;
2762 gpi_dev->dma_device.device_free_chan_resources =
2763 gpi_free_chan_resources;
2764 gpi_dev->dma_device.device_tx_status = dma_cookie_status;
2765 gpi_dev->dma_device.device_issue_pending = gpi_issue_pending;
2766 gpi_dev->dma_device.device_prep_slave_sg = gpi_prep_slave_sg;
2767 gpi_dev->dma_device.device_config = gpi_config;
2768 gpi_dev->dma_device.device_terminate_all = gpi_terminate_all;
2769 gpi_dev->dma_device.dev = gpi_dev->dev;
2770 gpi_dev->dma_device.device_pause = gpi_pause;
2771 gpi_dev->dma_device.device_resume = gpi_resume;
2772
2773 /* register with dmaengine framework */
2774 ret = dma_async_device_register(&gpi_dev->dma_device);
2775 if (ret) {
2776 GPI_ERR(gpi_dev, "async_device_register failed ret:%d", ret);
2777 return ret;
2778 }
2779
2780 ret = of_dma_controller_register(gpi_dev->dev->of_node,
2781 gpi_of_dma_xlate, gpi_dev);
2782 if (ret) {
2783 GPI_ERR(gpi_dev, "of_dma_controller_reg failed ret:%d", ret);
2784 return ret;
2785 }
2786
2787 /* setup debug capabilities */
2788 gpi_setup_debug(gpi_dev);
2789 GPI_LOG(gpi_dev, "probe success\n");
2790
2791 return ret;
2792}
2793
2794static const struct of_device_id gpi_of_match[] = {
2795 { .compatible = "qcom,gpi-dma" },
2796 {}
2797};
2798MODULE_DEVICE_TABLE(of, gpi_of_match);
2799
2800static struct platform_driver gpi_driver = {
2801 .probe = gpi_probe,
2802 .driver = {
2803 .name = GPI_DMA_DRV_NAME,
2804 .of_match_table = gpi_of_match,
2805 },
2806};
2807
2808static int __init gpi_init(void)
2809{
2810 pdentry = debugfs_create_dir(GPI_DMA_DRV_NAME, NULL);
2811 return platform_driver_register(&gpi_driver);
2812}
2813module_init(gpi_init)
2814
2815MODULE_DESCRIPTION("QCOM GPI DMA engine driver");
2816MODULE_LICENSE("GPL v2");