blob: 20d5d66c3044cc5f0ce3d0a68eb482110d8a1210 [file] [log] [blame]
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +05301/* Copyright (c) 2015-2020, The Linux Foundation. All rights reserved.
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +05302 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef __MHI_H
14#define __MHI_H
15
16#include <linux/msm_ep_pcie.h>
17#include <linux/ipc_logging.h>
18#include <linux/msm_mhi_dev.h>
19
20/**
21 * MHI control data structures alloted by the host, including
22 * channel context array, event context array, command context and rings.
23 */
24
25/* Channel context state */
26enum mhi_dev_ch_ctx_state {
27 MHI_DEV_CH_STATE_DISABLED,
28 MHI_DEV_CH_STATE_ENABLED,
29 MHI_DEV_CH_STATE_RUNNING,
30 MHI_DEV_CH_STATE_SUSPENDED,
31 MHI_DEV_CH_STATE_STOP,
32 MHI_DEV_CH_STATE_ERROR,
33 MHI_DEV_CH_STATE_RESERVED,
34 MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
35};
36
37/* Channel type */
38enum mhi_dev_ch_ctx_type {
39 MHI_DEV_CH_TYPE_NONE,
40 MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
41 MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
42 MHI_DEV_CH_RESERVED
43};
44
45/* Channel context type */
46struct mhi_dev_ch_ctx {
47 enum mhi_dev_ch_ctx_state ch_state;
48 enum mhi_dev_ch_ctx_type ch_type;
49 uint32_t err_indx;
50 uint64_t rbase;
51 uint64_t rlen;
52 uint64_t rp;
53 uint64_t wp;
54} __packed;
55
56enum mhi_dev_ring_element_type_id {
57 MHI_DEV_RING_EL_INVALID = 0,
58 MHI_DEV_RING_EL_NOOP = 1,
59 MHI_DEV_RING_EL_TRANSFER = 2,
60 MHI_DEV_RING_EL_RESET = 16,
61 MHI_DEV_RING_EL_STOP = 17,
62 MHI_DEV_RING_EL_START = 18,
63 MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
64 MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
65 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
66 MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
67 MHI_DEV_RING_EL_UNDEF
68};
69
70enum mhi_dev_ring_state {
71 RING_STATE_UINT = 0,
72 RING_STATE_IDLE,
73 RING_STATE_PENDING,
74};
75
76enum mhi_dev_ring_type {
77 RING_TYPE_CMD = 0,
78 RING_TYPE_ER,
79 RING_TYPE_CH,
80 RING_TYPE_INVAL
81};
82
83/* Event context interrupt moderation */
84enum mhi_dev_evt_ctx_int_mod_timer {
85 MHI_DEV_EVT_INT_MODERATION_DISABLED
86};
87
88/* Event ring type */
89enum mhi_dev_evt_ctx_event_ring_type {
90 MHI_DEV_EVT_TYPE_DEFAULT,
91 MHI_DEV_EVT_TYPE_VALID,
92 MHI_DEV_EVT_RESERVED
93};
94
95/* Event ring context type */
96struct mhi_dev_ev_ctx {
97 uint32_t res1:16;
98 enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
99 enum mhi_dev_evt_ctx_event_ring_type ertype;
100 uint32_t msivec;
101 uint64_t rbase;
102 uint64_t rlen;
103 uint64_t rp;
104 uint64_t wp;
105} __packed;
106
107/* Command context */
108struct mhi_dev_cmd_ctx {
109 uint32_t res1;
110 uint32_t res2;
111 uint32_t res3;
112 uint64_t rbase;
113 uint64_t rlen;
114 uint64_t rp;
115 uint64_t wp;
116} __packed;
117
118/* generic context */
119struct mhi_dev_gen_ctx {
120 uint32_t res1;
121 uint32_t res2;
122 uint32_t res3;
123 uint64_t rbase;
124 uint64_t rlen;
125 uint64_t rp;
126 uint64_t wp;
127} __packed;
128
129/* Transfer ring element */
130struct mhi_dev_transfer_ring_element {
131 uint64_t data_buf_ptr;
132 uint32_t len:16;
133 uint32_t res1:16;
134 uint32_t chain:1;
135 uint32_t res2:7;
136 uint32_t ieob:1;
137 uint32_t ieot:1;
138 uint32_t bei:1;
139 uint32_t res3:5;
140 enum mhi_dev_ring_element_type_id type:8;
141 uint32_t res4:8;
142} __packed;
143
144/* Command ring element */
145/* Command ring No op command */
146struct mhi_dev_cmd_ring_op {
147 uint64_t res1;
148 uint32_t res2;
149 uint32_t res3:16;
150 enum mhi_dev_ring_element_type_id type:8;
151 uint32_t chid:8;
152} __packed;
153
154/* Command ring reset channel command */
155struct mhi_dev_cmd_ring_reset_channel_cmd {
156 uint64_t res1;
157 uint32_t res2;
158 uint32_t res3:16;
159 enum mhi_dev_ring_element_type_id type:8;
160 uint32_t chid:8;
161} __packed;
162
163/* Command ring stop channel command */
164struct mhi_dev_cmd_ring_stop_channel_cmd {
165 uint64_t res1;
166 uint32_t res2;
167 uint32_t res3:16;
168 enum mhi_dev_ring_element_type_id type:8;
169 uint32_t chid:8;
170} __packed;
171
172/* Command ring start channel command */
173struct mhi_dev_cmd_ring_start_channel_cmd {
174 uint64_t res1;
175 uint32_t seqnum;
176 uint32_t reliable:1;
177 uint32_t res2:15;
178 enum mhi_dev_ring_element_type_id type:8;
179 uint32_t chid:8;
180} __packed;
181
182enum mhi_dev_cmd_completion_code {
183 MHI_CMD_COMPL_CODE_INVALID = 0,
184 MHI_CMD_COMPL_CODE_SUCCESS = 1,
185 MHI_CMD_COMPL_CODE_EOT = 2,
186 MHI_CMD_COMPL_CODE_OVERFLOW = 3,
187 MHI_CMD_COMPL_CODE_EOB = 4,
188 MHI_CMD_COMPL_CODE_UNDEFINED = 16,
189 MHI_CMD_COMPL_CODE_RING_EL = 17,
190 MHI_CMD_COMPL_CODE_RES
191};
192
193/* Event ring elements */
194/* Transfer completion event */
195struct mhi_dev_event_ring_transfer_completion {
196 uint64_t ptr;
197 uint32_t len:16;
198 uint32_t res1:8;
199 enum mhi_dev_cmd_completion_code code:8;
200 uint32_t res2:16;
201 enum mhi_dev_ring_element_type_id type:8;
202 uint32_t chid:8;
203} __packed;
204
205/* Command completion event */
206struct mhi_dev_event_ring_cmd_completion {
207 uint64_t ptr;
208 uint32_t res1:24;
209 enum mhi_dev_cmd_completion_code code:8;
210 uint32_t res2:16;
211 enum mhi_dev_ring_element_type_id type:8;
212 uint32_t res3:8;
213} __packed;
214
215enum mhi_dev_state {
216 MHI_DEV_RESET_STATE = 0,
217 MHI_DEV_READY_STATE,
218 MHI_DEV_M0_STATE,
219 MHI_DEV_M1_STATE,
220 MHI_DEV_M2_STATE,
221 MHI_DEV_M3_STATE,
222 MHI_DEV_MAX_STATE,
223 MHI_DEV_SYSERR_STATE = 0xff
224};
225
226/* MHI state change event */
227struct mhi_dev_event_ring_state_change {
228 uint64_t ptr;
229 uint32_t res1:24;
230 enum mhi_dev_state mhistate:8;
231 uint32_t res2:16;
232 enum mhi_dev_ring_element_type_id type:8;
233 uint32_t res3:8;
234} __packed;
235
236enum mhi_dev_execenv {
237 MHI_DEV_SBL_EE = 1,
238 MHI_DEV_AMSS_EE = 2,
239 MHI_DEV_UNRESERVED
240};
241
242/* EE state change event */
243struct mhi_dev_event_ring_ee_state_change {
244 uint64_t ptr;
245 uint32_t res1:24;
246 enum mhi_dev_execenv execenv:8;
247 uint32_t res2:16;
248 enum mhi_dev_ring_element_type_id type:8;
249 uint32_t res3:8;
250} __packed;
251
252/* Generic cmd to parse common details like type and channel id */
253struct mhi_dev_ring_generic {
254 uint64_t ptr;
255 uint32_t res1:24;
256 enum mhi_dev_state mhistate:8;
257 uint32_t res2:16;
258 enum mhi_dev_ring_element_type_id type:8;
259 uint32_t chid:8;
260} __packed;
261
262struct mhi_config {
263 uint32_t mhi_reg_len;
264 uint32_t version;
265 uint32_t event_rings;
Rama Krishna Phani A1b235ff2019-06-25 15:13:48 +0530266 uint32_t hw_event_rings;
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530267 uint32_t channels;
268 uint32_t chdb_offset;
269 uint32_t erdb_offset;
270};
271
272#define NUM_CHANNELS 128
273#define HW_CHANNEL_BASE 100
Nitesh Gupta67c5c612020-04-30 23:28:38 +0530274#define NUM_HW_CHANNELS 15
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530275#define HW_CHANNEL_END 107
276#define MHI_ENV_VALUE 2
277#define MHI_MASK_ROWS_CH_EV_DB 4
278#define TRB_MAX_DATA_SIZE 8192
279#define MHI_CTRL_STATE 100
280
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530281/* maximum transfer completion events buffer */
282#define NUM_TR_EVENTS_DEFAULT 128
283
284/* Set flush threshold to 80% of event buf size */
285#define MHI_CMPL_EVT_FLUSH_THRSHLD(n) ((n * 8) / 10)
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530286
287/* Possible ring element types */
288union mhi_dev_ring_element_type {
289 struct mhi_dev_cmd_ring_op cmd_no_op;
290 struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
291 struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
292 struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
293 struct mhi_dev_transfer_ring_element tre;
294 struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
295 struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
296 struct mhi_dev_event_ring_state_change evt_state_change;
297 struct mhi_dev_event_ring_ee_state_change evt_ee_state;
298 struct mhi_dev_ring_generic generic;
299};
300
301/* Transfer ring element type */
302union mhi_dev_ring_ctx {
303 struct mhi_dev_cmd_ctx cmd;
304 struct mhi_dev_ev_ctx ev;
305 struct mhi_dev_ch_ctx ch;
306 struct mhi_dev_gen_ctx generic;
307};
308
309/* MHI host Control and data address region */
310struct mhi_host_addr {
311 uint32_t ctrl_base_lsb;
312 uint32_t ctrl_base_msb;
313 uint32_t ctrl_limit_lsb;
314 uint32_t ctrl_limit_msb;
315 uint32_t data_base_lsb;
316 uint32_t data_base_msb;
317 uint32_t data_limit_lsb;
318 uint32_t data_limit_msb;
319};
320
321/* MHI physical and virtual address region */
322struct mhi_meminfo {
323 struct device *dev;
324 uintptr_t pa_aligned;
325 uintptr_t pa_unaligned;
326 uintptr_t va_aligned;
327 uintptr_t va_unaligned;
328 uintptr_t size;
329};
330
331struct mhi_addr {
332 uint64_t host_pa;
333 uintptr_t device_pa;
334 uintptr_t device_va;
335 size_t size;
336 dma_addr_t phy_addr;
337 void *virt_addr;
338 bool use_ipa_dma;
339};
340
341struct mhi_interrupt_state {
342 uint32_t mask;
343 uint32_t status;
344};
345
346enum mhi_dev_channel_state {
347 MHI_DEV_CH_UNINT,
348 MHI_DEV_CH_STARTED,
349 MHI_DEV_CH_PENDING_START,
350 MHI_DEV_CH_PENDING_STOP,
351 MHI_DEV_CH_STOPPED,
352 MHI_DEV_CH_CLOSED,
353};
354
355enum mhi_dev_ch_operation {
356 MHI_DEV_OPEN_CH,
357 MHI_DEV_CLOSE_CH,
358 MHI_DEV_READ_CH,
359 MHI_DEV_READ_WR,
360 MHI_DEV_POLL,
361};
362
363enum mhi_dev_tr_compl_evt_type {
364 SEND_EVENT_BUFFER,
365 SEND_EVENT_RD_OFFSET,
366};
367
368enum mhi_dev_transfer_type {
369 MHI_DEV_DMA_SYNC,
370 MHI_DEV_DMA_ASYNC,
371};
372
373struct mhi_dev_channel;
374
375struct mhi_dev_ring {
376 struct list_head list;
377 struct mhi_dev *mhi_dev;
378
379 uint32_t id;
380 uint32_t rd_offset;
381 uint32_t wr_offset;
382 uint32_t ring_size;
383
384 enum mhi_dev_ring_type type;
385 enum mhi_dev_ring_state state;
386
387 /* device virtual address location of the cached host ring ctx data */
388 union mhi_dev_ring_element_type *ring_cache;
389 /* Physical address of the cached ring copy on the device side */
390 dma_addr_t ring_cache_dma_handle;
391 /* Physical address of the host where we will write/read to/from */
392 struct mhi_addr ring_shadow;
393 /* Ring type - cmd, event, transfer ring and its rp/wp... */
394 union mhi_dev_ring_ctx *ring_ctx;
395 /* ring_ctx_shadow -> tracking ring_ctx in the host */
396 union mhi_dev_ring_ctx *ring_ctx_shadow;
397 void (*ring_cb)(struct mhi_dev *dev,
398 union mhi_dev_ring_element_type *el,
399 void *ctx);
400};
401
402static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring,
403 uint32_t rd_offset)
404{
405 ring->rd_offset++;
406 if (ring->rd_offset == ring->ring_size)
407 ring->rd_offset = 0;
408}
409
410/* trace information planned to use for read/write */
411#define TRACE_DATA_MAX 128
412#define MHI_DEV_DATA_MAX 512
413
414#define MHI_DEV_MMIO_RANGE 0xc80
415
416struct ring_cache_req {
417 struct completion *done;
418 void *context;
419};
420
421struct event_req {
422 union mhi_dev_ring_element_type *tr_events;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530423 /*
424 * Start index of the completion event buffer segment
425 * to be flushed to host
426 */
427 u32 start;
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530428 u32 num_events;
429 dma_addr_t dma;
430 u32 dma_len;
431 dma_addr_t event_rd_dma;
432 void *context;
433 enum mhi_dev_tr_compl_evt_type event_type;
434 u32 event_ring;
435 void (*client_cb)(void *req);
436 struct list_head list;
437};
438
439struct mhi_dev_channel {
440 struct list_head list;
441 struct list_head clients;
442 /* synchronization for changing channel state,
443 * adding/removing clients, mhi_dev callbacks, etc
444 */
445 struct mhi_dev_ring *ring;
446
447 enum mhi_dev_channel_state state;
448 uint32_t ch_id;
449 enum mhi_dev_ch_ctx_type ch_type;
450 struct mutex ch_lock;
451 /* client which the current inbound/outbound message is for */
452 struct mhi_dev_client *active_client;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530453 /* Pointer to completion event buffer */
454 union mhi_dev_ring_element_type *tr_events;
455 /* Indices for completion event buffer */
456 uint32_t evt_buf_rp;
457 uint32_t evt_buf_wp;
458 uint32_t evt_buf_size;
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530459 /*
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530460 * Pointer to a block of event request structs used to temporarily store
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530461 * completion events and meta data before sending them to host
462 */
463 struct event_req *ereqs;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530464 /* Linked list head for event request structs */
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530465 struct list_head event_req_buffers;
Siva Kumar Akkireddie7d055b2018-10-11 22:27:55 +0530466 uint32_t evt_req_size;
467 /* Linked list head for event request structs to be flushed */
468 struct list_head flush_event_req_buffers;
469 /* Pointer to the currently used event request struct */
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530470 struct event_req *curr_ereq;
471
472 /* current TRE being processed */
473 uint64_t tre_loc;
474 /* current TRE size */
475 uint32_t tre_size;
476 /* tre bytes left to read/write */
477 uint32_t tre_bytes_left;
478 /* td size being read/written from/to so far */
479 uint32_t td_size;
Siva Kumar Akkireddi0f82ea32019-11-11 14:56:01 +0530480 uint32_t pend_wr_count;
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530481 bool skip_td;
482};
483
484/* Structure device for mhi dev */
485struct mhi_dev {
486 struct platform_device *pdev;
487 struct device *dev;
488 /* MHI MMIO related members */
489 phys_addr_t mmio_base_pa_addr;
490 void *mmio_base_addr;
491 phys_addr_t ipa_uc_mbox_crdb;
492 phys_addr_t ipa_uc_mbox_erdb;
493
494 uint32_t *mmio_backup;
495 struct mhi_config cfg;
496 bool mmio_initialized;
497
498 spinlock_t lock;
499 /* Host control base information */
500 struct mhi_host_addr host_addr;
501 struct mhi_addr ctrl_base;
502 struct mhi_addr data_base;
503 struct mhi_addr ch_ctx_shadow;
504 struct mhi_dev_ch_ctx *ch_ctx_cache;
505 dma_addr_t ch_ctx_cache_dma_handle;
506 struct mhi_addr ev_ctx_shadow;
507 struct mhi_dev_ch_ctx *ev_ctx_cache;
508 dma_addr_t ev_ctx_cache_dma_handle;
509
510 struct mhi_addr cmd_ctx_shadow;
511 struct mhi_dev_ch_ctx *cmd_ctx_cache;
512 dma_addr_t cmd_ctx_cache_dma_handle;
513 struct mhi_dev_ring *ring;
514 int mhi_irq;
515 struct mhi_dev_channel *ch;
516
517 int ctrl_int;
518 int cmd_int;
519 /* CHDB and EVDB device interrupt state */
520 struct mhi_interrupt_state chdb[4];
521 struct mhi_interrupt_state evdb[4];
522
523 /* Scheduler work */
524 struct work_struct chdb_ctrl_work;
525
526 struct mutex mhi_lock;
527 struct mutex mhi_event_lock;
528
529 /* process a ring element */
530 struct workqueue_struct *pending_ring_wq;
531 struct work_struct pending_work;
532
533 struct list_head event_ring_list;
534 struct list_head process_ring_list;
535
536 uint32_t cmd_ring_idx;
537 uint32_t ev_ring_start;
538 uint32_t ch_ring_start;
539
540 /* IPA Handles */
Nitesh Gupta67c5c612020-04-30 23:28:38 +0530541 u32 ipa_clnt_hndl[NUM_HW_CHANNELS];
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530542 struct workqueue_struct *ring_init_wq;
543 struct work_struct ring_init_cb_work;
544 struct work_struct re_init;
545
546 /* EP PCIe registration */
547 struct workqueue_struct *pcie_event_wq;
548 struct ep_pcie_register_event event_reg;
549 u32 ifc_id;
550 struct ep_pcie_hw *phandle;
551 struct work_struct pcie_event;
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530552
553 atomic_t write_active;
554 atomic_t is_suspended;
555 atomic_t mhi_dev_wake;
556 atomic_t re_init_done;
557 struct mutex mhi_write_test;
558 u32 device_local_pa_base;
559 u32 mhi_ep_msi_num;
560 u32 mhi_version;
561 void *dma_cache;
562 void *read_handle;
563 void *write_handle;
564 /* Physical scratch buffer for writing control data to the host */
565 dma_addr_t cache_dma_handle;
566 /*
567 * Physical scratch buffer address used when picking host data
568 * from the host used in mhi_read()
569 */
570 dma_addr_t read_dma_handle;
571 /*
572 * Physical scratch buffer address used when writing to the host
573 * region from device used in mhi_write()
574 */
575 dma_addr_t write_dma_handle;
576
577 /* Use IPA DMA for Software channel data transfer */
578 bool use_ipa;
579
580 /* iATU is required to map control and data region */
581 bool config_iatu;
582
Siva Kumar Akkireddidaedab72018-12-29 21:17:01 +0530583 /* Indicates if mhi init is done */
584 bool init_done;
585
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530586 /* MHI state info */
587 enum mhi_ctrl_info ctrl_info;
588
589 /*Register for interrupt*/
590 bool mhi_int;
591 bool mhi_int_en;
592 /* Registered client callback list */
593 struct list_head client_cb_list;
594
595 struct kobj_uevent_env kobj_env;
596};
597
598
599enum mhi_msg_level {
600 MHI_MSG_VERBOSE = 0x0,
601 MHI_MSG_INFO = 0x1,
602 MHI_MSG_DBG = 0x2,
603 MHI_MSG_WARNING = 0x3,
604 MHI_MSG_ERROR = 0x4,
605 MHI_MSG_CRITICAL = 0x5,
606 MHI_MSG_reserved = 0x80000000
607};
608
Siva Kumar Akkireddi4e388212019-02-05 19:53:19 +0530609extern uint32_t bhi_imgtxdb;
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530610extern enum mhi_msg_level mhi_msg_lvl;
611extern enum mhi_msg_level mhi_ipc_msg_lvl;
612extern void *mhi_ipc_log;
613
614#define mhi_log(_msg_lvl, _msg, ...) do { \
615 if (_msg_lvl >= mhi_msg_lvl) { \
616 pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
617 } \
618 if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \
619 ipc_log_string(mhi_ipc_log, \
Siva Kumar Akkireddi4e388212019-02-05 19:53:19 +0530620 "[0x%x %s] " _msg, bhi_imgtxdb, __func__, ##__VA_ARGS__); \
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530621 } \
622} while (0)
623
624
625/* Use ID 0 for legacy /dev/mhi_ctrl. Channel 0 used for internal only */
626#define MHI_DEV_UEVENT_CTRL 0
627
628struct mhi_dev_uevent_info {
629 enum mhi_client_channel channel;
630 enum mhi_ctrl_info ctrl_info;
631};
632
633struct mhi_dev_iov {
634 void *addr;
635 uint32_t buf_size;
636};
637
638
639struct mhi_dev_trace {
640 unsigned int timestamp;
641 uint32_t data[TRACE_DATA_MAX];
642};
643
644/* MHI Ring related functions */
645
646/**
647 * mhi_ring_init() - Initializes the Ring id to the default un-initialized
648 * state. Once a start command is received, the respective ring
649 * is then prepared by fetching the context and updating the
650 * offset.
651 * @ring: Ring for the respective context - Channel/Event/Command.
652 * @type: Command/Event or Channel transfer ring.
653 * @id: Index to the ring id. For command its usually 1, Event rings
654 * may vary from 1 to 128. Channels vary from 1 to 256.
655 */
656void mhi_ring_init(struct mhi_dev_ring *ring,
657 enum mhi_dev_ring_type type, int id);
658
659/**
660 * mhi_ring_start() - Fetches the respective transfer ring's context from
661 * the host and updates the write offset.
662 * @ring: Ring for the respective context - Channel/Event/Command.
663 * @ctx: Transfer ring of type mhi_dev_ring_ctx.
664 * @dev: MHI device structure.
665 */
666int mhi_ring_start(struct mhi_dev_ring *ring,
667 union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi);
668
669/**
670 * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally.
671 * @ring: Ring for the respective context - Channel/Event/Command.
672 * @wr_offset: Cache the TRE's upto the write offset value.
673 */
674int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset);
675
676/**
677 * mhi_dev_update_wr_offset() - Check for any updates in the write offset.
678 * @ring: Ring for the respective context - Channel/Event/Command.
679 */
680int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring);
681
682/**
683 * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements
684 * and invoke the clients callback.
685 * @ring: Ring for the respective context - Channel/Event/Command.
686 */
687int mhi_dev_process_ring(struct mhi_dev_ring *ring);
688
689/**
690 * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the
691 * clients callback.
692 * @ring: Ring for the respective context - Channel/Event/Command.
693 * @offset: Offset index into the respective ring's cache element.
694 */
695int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset);
696
697/**
698 * mhi_dev_add_element() - Copy the element to the respective transfer rings
699 * read pointer and increment the index.
700 * @ring: Ring for the respective context - Channel/Event/Command.
701 * @element: Transfer ring element to be copied to the host memory.
702 */
703int mhi_dev_add_element(struct mhi_dev_ring *ring,
704 union mhi_dev_ring_element_type *element,
705 struct event_req *ereq, int evt_offset);
706/**
707 * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
708 * from device to the host.
709 * @dst_pa: Physical destination address.
710 * @src: Source virtual address.
711 * @len: Numer of bytes to be transferred.
712 * @mhi: MHI dev structure.
713 * @req: mhi_req structure
714 */
715int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
716 struct mhi_dev *mhi, struct mhi_req *req);
717
718/**
719 * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
720 * from host to the device.
721 * @dst: Physical destination virtual address.
722 * @src_pa: Source physical address.
723 * @len: Numer of bytes to be transferred.
724 * @mhi: MHI dev structure.
725 * @req: mhi_req structure
726 */
727int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
728 struct mhi_dev *mhi, struct mhi_req *mreq);
729
730/**
731 * mhi_dev_write_to_host() - Transfer data from device to host.
732 * Based on support available, either IPA DMA or memcpy is used.
733 * @host: Host and device address details.
734 * @buf: Data buffer that needs to be written to the host.
735 * @size: Data buffer size.
736 */
737void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *mhi_transfer,
738 struct event_req *ereq, enum mhi_dev_transfer_type type);
739/**
740 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
741 * from host to device.
742 * @host: Host and device address details.
743 * @buf: Data buffer that needs to be read from the host.
744 * @size: Data buffer size.
745 */
746void mhi_dev_read_from_host(struct mhi_dev *mhi,
747 struct mhi_addr *mhi_transfer);
748
749/**
750 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
751 * from host to device.
752 * @host: Host and device address details.
753 * @buf: Data buffer that needs to be read from the host.
754 * @size: Data buffer size.
755 */
756
757void mhi_ring_set_cb(struct mhi_dev_ring *ring,
758 void (*ring_cb)(struct mhi_dev *dev,
759 union mhi_dev_ring_element_type *el, void *ctx));
760
761/**
762 * mhi_ring_set_state() - Sets internal state of the ring for tracking whether
763 * a ring is being processed, idle or uninitialized.
764 * @ring: Ring for the respective context - Channel/Event/Command.
765 * @state: state of type mhi_dev_ring_state.
766 */
767void mhi_ring_set_state(struct mhi_dev_ring *ring,
768 enum mhi_dev_ring_state state);
769
770/**
771 * mhi_ring_get_state() - Obtains the internal state of the ring.
772 * @ring: Ring for the respective context - Channel/Event/Command.
773 */
774enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring);
775
776/* MMIO related functions */
777
778/**
779 * mhi_dev_mmio_read() - Generic MHI MMIO register read API.
780 * @dev: MHI device structure.
781 * @offset: MHI address offset from base.
782 * @reg_val: Pointer the register value is stored to.
783 */
784int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
785 uint32_t *reg_value);
786
787/**
788 * mhi_dev_mmio_read() - Generic MHI MMIO register write API.
789 * @dev: MHI device structure.
790 * @offset: MHI address offset from base.
791 * @val: Value to be written to the register offset.
792 */
793int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
794 uint32_t val);
795
796/**
797 * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API.
798 * @dev: MHI device structure.
799 * @offset: MHI address offset from base.
800 * @mask: Register field mask.
801 * @shift: Register field mask shift value.
802 * @val: Value to be written to the register offset.
803 */
804int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
805 uint32_t mask, uint32_t shift,
806 uint32_t val);
807/**
808 * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API.
809 * @dev: MHI device structure.
810 * @offset: MHI address offset from base.
811 * @mask: Register field mask.
812 * @shift: Register field mask shift value.
813 * @reg_val: Pointer the register value is stored to.
814 */
815int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
816 uint32_t mask, uint32_t shift,
817 uint32_t *reg_val);
818/**
819 * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
820 * @dev: MHI device structure.
821 */
822
823int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev);
824
825/**
826 * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt.
827 * @dev: MHI device structure.
828 */
829int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev);
830
831/**
832 * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status.
833 * @dev: MHI device structure.
834 */
835int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev);
836
837/**
838 * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt.
839 * @dev: MHI device structure.
840 */
841int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev);
842
843/**
844 * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt.
845 * @dev: MHI device structure.
846 */
847int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev);
848
849/**
850 * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status.
851 * @dev: MHI device structure.
852 */
853int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev);
854
855/**
856 * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given
857 * channel id.
858 * @dev: MHI device structure.
859 * @chdb_id: Channel id number.
860 */
861int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
862/**
863 * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given
864 * channel id.
865 * @dev: MHI device structure.
866 * @chdb_id: Channel id number.
867 */
868int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
869
870/**
871 * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given
872 * event ring id.
873 * @dev: MHI device structure.
874 * @erdb_id: Event ring id number.
875 */
876int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
877
878/**
879 * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given
880 * event ring id.
881 * @dev: MHI device structure.
882 * @erdb_id: Event ring id number.
883 */
884int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
885
886/**
887 * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell
888 * interrupts.
889 * @dev: MHI device structure.
890 */
891int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev);
892
893/**
894 * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell
895 * interrupts.
896 * @dev: MHI device structure.
897 */
898int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev);
899
900/**
901 * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell
902 * interrupts.
903 * @dev: MHI device structure.
904 */
905int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev);
906
907/**
908 * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell
909 * interrupts.
910 * @dev: MHI device structure.
911 */
912int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev);
913
914/**
915 *mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell
916 * interrupts.
917 * @dev: MHI device structure.
918 */
919int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev);
920
921/**
922 * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell
923 * interrupts.
924 * @dev: MHI device structure.
925 */
926int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev);
927
928/**
Rama Krishna Phani A090bbd12018-10-16 20:36:34 +0530929 * mhi_dev_mmio_mask_interrupts() - Mask all MHI interrupts.
930 * @dev: MHI device structure.
931 */
932void mhi_dev_mmio_mask_interrupts(struct mhi_dev *dev);
933
934/**
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +0530935 * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts.
936 * @dev: MHI device structure.
937 */
938int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev);
939
940/**
941 * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address.
942 @dev: MHI device structure.
943 */
944int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev);
945
946/**
947 * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address.
948 * @dev: MHI device structure.
949 */
950int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev);
951
952/**
953 * mhi_dev_get_crc_base() - Fetch the Command ring context base address.
954 * @dev: MHI device structure.
955 */
956int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev);
957
958/**
959 * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID.
960 * @dev: MHI device structure.
961 * @wr_offset: Pointer of the write offset to be written to.
962 */
963int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
964
965/**
966 * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID.
967 * @dev: MHI device structure.
968 * @wr_offset: Pointer of the write offset to be written to.
969 */
970int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
971
972/**
973 * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID.
974 * @dev: MHI device structure.
975 * @wr_offset: Pointer of the write offset to be written to.
976 */
977int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
978
979/**
980 * mhi_dev_mmio_set_env() - Write the Execution Enviornment.
981 * @dev: MHI device structure.
982 * @value: Value of the EXEC EVN.
983 */
984int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value);
985
986/**
987 * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization.
988 * @dev: MHI device structure.
989 */
990int mhi_dev_mmio_reset(struct mhi_dev *dev);
991
992/**
993 * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host.
994 * @dev: MHI device structure.
995 */
996int mhi_dev_get_mhi_addr(struct mhi_dev *dev);
997
998/**
999 * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3.
1000 * @dev: MHI device structure.
1001 * @state: Pointer of type mhi_dev_state
1002 * @mhi_reset: MHI device reset from host.
1003 */
1004int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state,
Siddartha Mohanadosse81dee92018-12-06 18:22:28 -08001005 u32 *mhi_reset);
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +05301006
1007/**
1008 * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event
1009 * rings, support number of channels, and offsets to the Channel
1010 * and Event doorbell from the host.
1011 * @dev: MHI device structure.
1012 */
1013int mhi_dev_mmio_init(struct mhi_dev *dev);
1014
1015/**
1016 * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by
1017 * the host.
1018 * @dev: MHI device structure.
1019 */
1020int mhi_dev_update_ner(struct mhi_dev *dev);
1021
1022/**
1023 * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3.
1024 * @dev: MHI device structure.
1025 */
1026int mhi_dev_restore_mmio(struct mhi_dev *dev);
1027
1028/**
1029 * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3.
1030 * @dev: MHI device structure.
1031 */
1032int mhi_dev_backup_mmio(struct mhi_dev *dev);
1033
1034/**
1035 * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug.
1036 * @dev: MHI device structure.
1037 */
1038int mhi_dev_dump_mmio(struct mhi_dev *dev);
1039
1040/**
1041 * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation
1042 * unit between device and host to map the Data and Control
1043 * information.
1044 * @dev: MHI device structure.
1045 */
1046int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi);
1047
1048/**
1049 * mhi_dev_send_state_change_event() - Send state change event to the host
1050 * such as M0/M1/M2/M3.
1051 * @dev: MHI device structure.
1052 * @state: MHI state of type mhi_dev_state
1053 */
1054int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
1055 enum mhi_dev_state state);
1056/**
1057 * mhi_dev_send_ee_event() - Send Execution enviornment state change
1058 * event to the host.
1059 * @dev: MHI device structure.
1060 * @state: MHI state of type mhi_dev_execenv
1061 */
1062int mhi_dev_send_ee_event(struct mhi_dev *mhi,
1063 enum mhi_dev_execenv exec_env);
1064/**
1065 * mhi_dev_syserr() - System error when unexpected events are received.
1066 * @dev: MHI device structure.
1067 */
1068int mhi_dev_syserr(struct mhi_dev *mhi);
1069
1070/**
1071 * mhi_dev_suspend() - MHI device suspend to stop channel processing at the
1072 * Transfer ring boundary, update the channel state to suspended.
1073 * @dev: MHI device structure.
1074 */
1075int mhi_dev_suspend(struct mhi_dev *mhi);
1076
1077/**
1078 * mhi_dev_resume() - MHI device resume to update the channel state to running.
1079 * @dev: MHI device structure.
1080 */
1081int mhi_dev_resume(struct mhi_dev *mhi);
1082
1083/**
1084 * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW
1085 * accelerated data to be send and prevent MHI suspend.
1086 * @dev: MHI device structure.
1087 */
1088int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi);
1089
1090/**
1091 * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel
1092 * context with IPA when performing a MHI resume.
1093 * @dev: MHI device structure.
1094 */
1095int mhi_pcie_config_db_routing(struct mhi_dev *mhi);
1096
1097/**
1098 * mhi_uci_init() - Initializes the User control interface (UCI) which
1099 * exposes device nodes for the supported MHI software
1100 * channels.
1101 */
1102int mhi_uci_init(void);
1103
1104/**
1105 * mhi_dev_net_interface_init() - Initializes the mhi device network interface
1106 * which exposes the virtual network interface (mhi_dev_net0).
1107 * data packets will transfer between MHI host interface (mhi_swip)
1108 * and mhi_dev_net interface using software path
1109 */
1110int mhi_dev_net_interface_init(void);
1111
1112void mhi_dev_notify_a7_event(struct mhi_dev *mhi);
1113
1114void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason);
Siva Kumar Akkireddi9ca7e342018-12-14 04:53:01 +05301115/**
1116 * mhi_uci_chan_state_notify_all - Notifies channel state updates for
1117 * all clients who have uevents enabled.
1118 */
1119void mhi_uci_chan_state_notify_all(struct mhi_dev *mhi,
1120 enum mhi_ctrl_info ch_state);
1121/**
1122 * mhi_uci_chan_state_notify - Notifies channel state update to the client
1123 * if uevents are enabled.
1124 */
1125void mhi_uci_chan_state_notify(struct mhi_dev *mhi,
1126 enum mhi_client_channel ch_id, enum mhi_ctrl_info ch_state);
Rama Krishna Phani Aa39a4332018-06-26 20:27:01 +05301127
1128#endif /* _MHI_H */