blob: 1a73d9211306d78c3e49baf297c8f4eafd76b204 [file] [log] [blame]
Siddartha Mohanadoss603f7652017-01-26 15:59:41 -08001/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef __MHI_H
14#define __MHI_H
15
16#include <linux/msm_ep_pcie.h>
17#include <linux/types.h>
18#include <linux/ipc_logging.h>
19#include <linux/dma-mapping.h>
20
21/**
22 * MHI control data structures alloted by the host, including
23 * channel context array, event context array, command context and rings.
24 */
25
26/* Channel context state */
27enum mhi_dev_ch_ctx_state {
28 MHI_DEV_CH_STATE_DISABLED,
29 MHI_DEV_CH_STATE_ENABLED,
30 MHI_DEV_CH_STATE_RUNNING,
31 MHI_DEV_CH_STATE_SUSPENDED,
32 MHI_DEV_CH_STATE_STOP,
33 MHI_DEV_CH_STATE_ERROR,
34 MHI_DEV_CH_STATE_RESERVED,
35 MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF
36};
37
38/* Channel type */
39enum mhi_dev_ch_ctx_type {
40 MHI_DEV_CH_TYPE_NONE,
41 MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL,
42 MHI_DEV_CH_TYPE_INBOUND_CHANNEL,
43 MHI_DEV_CH_RESERVED
44};
45
46/* Channel context type */
47struct mhi_dev_ch_ctx {
48 enum mhi_dev_ch_ctx_state ch_state;
49 enum mhi_dev_ch_ctx_type ch_type;
50 uint32_t err_indx;
51 uint64_t rbase;
52 uint64_t rlen;
53 uint64_t rp;
54 uint64_t wp;
55} __packed;
56
57enum mhi_dev_ring_element_type_id {
58 MHI_DEV_RING_EL_INVALID = 0,
59 MHI_DEV_RING_EL_NOOP = 1,
60 MHI_DEV_RING_EL_TRANSFER = 2,
61 MHI_DEV_RING_EL_RESET = 16,
62 MHI_DEV_RING_EL_STOP = 17,
63 MHI_DEV_RING_EL_START = 18,
64 MHI_DEV_RING_EL_MHI_STATE_CHG = 32,
65 MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33,
66 MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34,
67 MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64,
68 MHI_DEV_RING_EL_UNDEF
69};
70
71enum mhi_dev_ring_state {
72 RING_STATE_UINT = 0,
73 RING_STATE_IDLE,
74 RING_STATE_PENDING,
75};
76
77enum mhi_dev_ring_type {
78 RING_TYPE_CMD = 0,
79 RING_TYPE_ER,
80 RING_TYPE_CH,
81 RING_TYPE_INVAL
82};
83
84/* Event context interrupt moderation */
85enum mhi_dev_evt_ctx_int_mod_timer {
86 MHI_DEV_EVT_INT_MODERATION_DISABLED
87};
88
89/* Event ring type */
90enum mhi_dev_evt_ctx_event_ring_type {
91 MHI_DEV_EVT_TYPE_DEFAULT,
92 MHI_DEV_EVT_TYPE_VALID,
93 MHI_DEV_EVT_RESERVED
94};
95
96/* Event ring context type */
97struct mhi_dev_ev_ctx {
98 uint32_t res1:16;
99 enum mhi_dev_evt_ctx_int_mod_timer intmodt:16;
100 enum mhi_dev_evt_ctx_event_ring_type ertype;
101 uint32_t msivec;
102 uint64_t rbase;
103 uint64_t rlen;
104 uint64_t rp;
105 uint64_t wp;
106} __packed;
107
108/* Command context */
109struct mhi_dev_cmd_ctx {
110 uint32_t res1;
111 uint32_t res2;
112 uint32_t res3;
113 uint64_t rbase;
114 uint64_t rlen;
115 uint64_t rp;
116 uint64_t wp;
117} __packed;
118
119/* generic context */
120struct mhi_dev_gen_ctx {
121 uint32_t res1;
122 uint32_t res2;
123 uint32_t res3;
124 uint64_t rbase;
125 uint64_t rlen;
126 uint64_t rp;
127 uint64_t wp;
128} __packed;
129
130/* Transfer ring element */
131struct mhi_dev_transfer_ring_element {
132 uint64_t data_buf_ptr;
133 uint32_t len:16;
134 uint32_t res1:16;
135 uint32_t chain:1;
136 uint32_t res2:7;
137 uint32_t ieob:1;
138 uint32_t ieot:1;
139 uint32_t bei:1;
140 uint32_t res3:5;
141 enum mhi_dev_ring_element_type_id type:8;
142 uint32_t res4:8;
143} __packed;
144
145/* Command ring element */
146/* Command ring No op command */
147struct mhi_dev_cmd_ring_op {
148 uint64_t res1;
149 uint32_t res2;
150 uint32_t res3:16;
151 enum mhi_dev_ring_element_type_id type:8;
152 uint32_t chid:8;
153} __packed;
154
155/* Command ring reset channel command */
156struct mhi_dev_cmd_ring_reset_channel_cmd {
157 uint64_t res1;
158 uint32_t res2;
159 uint32_t res3:16;
160 enum mhi_dev_ring_element_type_id type:8;
161 uint32_t chid:8;
162} __packed;
163
164/* Command ring stop channel command */
165struct mhi_dev_cmd_ring_stop_channel_cmd {
166 uint64_t res1;
167 uint32_t res2;
168 uint32_t res3:16;
169 enum mhi_dev_ring_element_type_id type:8;
170 uint32_t chid:8;
171} __packed;
172
173/* Command ring start channel command */
174struct mhi_dev_cmd_ring_start_channel_cmd {
175 uint64_t res1;
176 uint32_t seqnum;
177 uint32_t reliable:1;
178 uint32_t res2:15;
179 enum mhi_dev_ring_element_type_id type:8;
180 uint32_t chid:8;
181} __packed;
182
183enum mhi_dev_cmd_completion_code {
184 MHI_CMD_COMPL_CODE_INVALID = 0,
185 MHI_CMD_COMPL_CODE_SUCCESS = 1,
186 MHI_CMD_COMPL_CODE_EOT = 2,
187 MHI_CMD_COMPL_CODE_OVERFLOW = 3,
188 MHI_CMD_COMPL_CODE_EOB = 4,
189 MHI_CMD_COMPL_CODE_UNDEFINED = 16,
190 MHI_CMD_COMPL_CODE_RING_EL = 17,
191 MHI_CMD_COMPL_CODE_RES
192};
193
194/* Event ring elements */
195/* Transfer completion event */
196struct mhi_dev_event_ring_transfer_completion {
197 uint64_t ptr;
198 uint32_t len:16;
199 uint32_t res1:8;
200 enum mhi_dev_cmd_completion_code code:8;
201 uint32_t res2:16;
202 enum mhi_dev_ring_element_type_id type:8;
203 uint32_t chid:8;
204} __packed;
205
206/* Command completion event */
207struct mhi_dev_event_ring_cmd_completion {
208 uint64_t ptr;
209 uint32_t res1:24;
210 enum mhi_dev_cmd_completion_code code:8;
211 uint32_t res2:16;
212 enum mhi_dev_ring_element_type_id type:8;
213 uint32_t res3:8;
214} __packed;
215
216enum mhi_dev_state {
217 MHI_DEV_RESET_STATE = 0,
218 MHI_DEV_READY_STATE,
219 MHI_DEV_M0_STATE,
220 MHI_DEV_M1_STATE,
221 MHI_DEV_M2_STATE,
222 MHI_DEV_M3_STATE,
223 MHI_DEV_MAX_STATE,
224 MHI_DEV_SYSERR_STATE = 0xff
225};
226
227/* MHI state change event */
228struct mhi_dev_event_ring_state_change {
229 uint64_t ptr;
230 uint32_t res1:24;
231 enum mhi_dev_state mhistate:8;
232 uint32_t res2:16;
233 enum mhi_dev_ring_element_type_id type:8;
234 uint32_t res3:8;
235} __packed;
236
237enum mhi_dev_execenv {
238 MHI_DEV_SBL_EE = 1,
239 MHI_DEV_AMSS_EE = 2,
240 MHI_DEV_UNRESERVED
241};
242
243/* EE state change event */
244struct mhi_dev_event_ring_ee_state_change {
245 uint64_t ptr;
246 uint32_t res1:24;
247 enum mhi_dev_execenv execenv:8;
248 uint32_t res2:16;
249 enum mhi_dev_ring_element_type_id type:8;
250 uint32_t res3:8;
251} __packed;
252
253/* Generic cmd to parse common details like type and channel id */
254struct mhi_dev_ring_generic {
255 uint64_t ptr;
256 uint32_t res1:24;
257 enum mhi_dev_state mhistate:8;
258 uint32_t res2:16;
259 enum mhi_dev_ring_element_type_id type:8;
260 uint32_t chid:8;
261} __packed;
262
263struct mhi_config {
264 uint32_t mhi_reg_len;
265 uint32_t version;
266 uint32_t event_rings;
267 uint32_t channels;
268 uint32_t chdb_offset;
269 uint32_t erdb_offset;
270};
271
272#define NUM_CHANNELS 128
273#define HW_CHANNEL_BASE 100
274#define HW_CHANNEL_END 107
275#define MHI_ENV_VALUE 2
276#define MHI_MASK_ROWS_CH_EV_DB 4
277#define TRB_MAX_DATA_SIZE 4096
278
279/* Possible ring element types */
280union mhi_dev_ring_element_type {
281 struct mhi_dev_cmd_ring_op cmd_no_op;
282 struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset;
283 struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop;
284 struct mhi_dev_cmd_ring_start_channel_cmd cmd_start;
285 struct mhi_dev_transfer_ring_element tre;
286 struct mhi_dev_event_ring_transfer_completion evt_tr_comp;
287 struct mhi_dev_event_ring_cmd_completion evt_cmd_comp;
288 struct mhi_dev_event_ring_state_change evt_state_change;
289 struct mhi_dev_event_ring_ee_state_change evt_ee_state;
290 struct mhi_dev_ring_generic generic;
291};
292
293/* Transfer ring element type */
294union mhi_dev_ring_ctx {
295 struct mhi_dev_cmd_ctx cmd;
296 struct mhi_dev_ev_ctx ev;
297 struct mhi_dev_ch_ctx ch;
298 struct mhi_dev_gen_ctx generic;
299};
300
301/* MHI host Control and data address region */
302struct mhi_host_addr {
303 uint32_t ctrl_base_lsb;
304 uint32_t ctrl_base_msb;
305 uint32_t ctrl_limit_lsb;
306 uint32_t ctrl_limit_msb;
307 uint32_t data_base_lsb;
308 uint32_t data_base_msb;
309 uint32_t data_limit_lsb;
310 uint32_t data_limit_msb;
311};
312
313/* MHI physical and virtual address region */
314struct mhi_meminfo {
315 struct device *dev;
316 uintptr_t pa_aligned;
317 uintptr_t pa_unaligned;
318 uintptr_t va_aligned;
319 uintptr_t va_unaligned;
320 uintptr_t size;
321};
322
323struct mhi_addr {
324 uint64_t host_pa;
325 uintptr_t device_pa;
326 uintptr_t device_va;
327 uint32_t size;
328};
329
330struct mhi_interrupt_state {
331 uint32_t mask;
332 uint32_t status;
333};
334
335enum mhi_dev_channel_state {
336 MHI_DEV_CH_UNINT,
337 MHI_DEV_CH_STARTED,
338 MHI_DEV_CH_PENDING_START,
339 MHI_DEV_CH_PENDING_STOP,
340 MHI_DEV_CH_STOPPED,
341 MHI_DEV_CH_CLOSED,
342};
343
344enum mhi_dev_ch_operation {
345 MHI_DEV_OPEN_CH,
346 MHI_DEV_CLOSE_CH,
347 MHI_DEV_READ_CH,
348 MHI_DEV_READ_WR,
349 MHI_DEV_POLL,
350};
351
352struct mhi_dev_channel;
353
354struct mhi_dev_ring {
355 struct list_head list;
356 struct mhi_dev *mhi_dev;
357
358 uint32_t id;
359 uint32_t rd_offset;
360 uint32_t wr_offset;
361 uint32_t ring_size;
362
363 enum mhi_dev_ring_type type;
364 enum mhi_dev_ring_state state;
365
366 /* device virtual address location of the cached host ring ctx data */
367 union mhi_dev_ring_element_type *ring_cache;
368 /* Physical address of the cached ring copy on the device side */
369 dma_addr_t ring_cache_dma_handle;
370 /* Physical address of the host where we will write/read to/from */
371 struct mhi_addr ring_shadow;
372 /* Ring type - cmd, event, transfer ring and its rp/wp... */
373 union mhi_dev_ring_ctx *ring_ctx;
374 /* ring_ctx_shadow -> tracking ring_ctx in the host */
375 union mhi_dev_ring_ctx *ring_ctx_shadow;
376 void (*ring_cb)(struct mhi_dev *dev,
377 union mhi_dev_ring_element_type *el,
378 void *ctx);
379};
380
381static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring,
382 uint32_t rd_offset)
383{
384 ring->rd_offset++;
385 if (ring->rd_offset == ring->ring_size)
386 ring->rd_offset = 0;
387}
388
389/* trace information planned to use for read/write */
390#define TRACE_DATA_MAX 128
391#define MHI_DEV_DATA_MAX 512
392
393#define MHI_DEV_MMIO_RANGE 0xc80
394
395enum cb_reason {
396 MHI_DEV_TRE_AVAILABLE = 0,
397};
398
399struct mhi_dev_client_cb_reason {
400 uint32_t ch_id;
401 enum cb_reason reason;
402};
403
404struct mhi_dev_client {
405 struct list_head list;
406 struct mhi_dev_channel *channel;
407 void (*event_trigger)(struct mhi_dev_client_cb_reason *cb);
408
409 /* mhi_dev calls are fully synchronous -- only one call may be
410 * active per client at a time for now.
411 */
412 struct mutex write_lock;
413 wait_queue_head_t wait;
414
415 /* trace logs */
416 spinlock_t tr_lock;
417 unsigned int tr_head;
418 unsigned int tr_tail;
419 struct mhi_dev_trace *tr_log;
420
421 /* client buffers */
422 struct mhi_dev_iov *iov;
423 uint32_t nr_iov;
424};
425
426struct mhi_dev_channel {
427 struct list_head list;
428 struct list_head clients;
429 /* synchronization for changing channel state,
430 * adding/removing clients, mhi_dev callbacks, etc
431 */
432 spinlock_t lock;
433
434 struct mhi_dev_ring *ring;
435
436 enum mhi_dev_channel_state state;
437 uint32_t ch_id;
438 enum mhi_dev_ch_ctx_type ch_type;
439 struct mutex ch_lock;
440 /* client which the current inbound/outbound message is for */
441 struct mhi_dev_client *active_client;
442
443 /* current TRE being processed */
444 uint64_t tre_loc;
445 /* current TRE size */
446 uint32_t tre_size;
447 /* tre bytes left to read/write */
448 uint32_t tre_bytes_left;
449 /* td size being read/written from/to so far */
450 uint32_t td_size;
451 bool wr_request_active;
452 bool skip_td;
453};
454
455/* Structure device for mhi dev */
456struct mhi_dev {
457 struct platform_device *pdev;
458 struct device *dev;
459 /* MHI MMIO related members */
460 phys_addr_t mmio_base_pa_addr;
461 void *mmio_base_addr;
462 phys_addr_t ipa_uc_mbox_crdb;
463 phys_addr_t ipa_uc_mbox_erdb;
464
465 uint32_t *mmio_backup;
466 struct mhi_config cfg;
467 bool mmio_initialized;
468
469 /* Host control base information */
470 struct mhi_host_addr host_addr;
471 struct mhi_addr ctrl_base;
472 struct mhi_addr data_base;
473 struct mhi_addr ch_ctx_shadow;
474 struct mhi_dev_ch_ctx *ch_ctx_cache;
475 dma_addr_t ch_ctx_cache_dma_handle;
476 struct mhi_addr ev_ctx_shadow;
477 struct mhi_dev_ch_ctx *ev_ctx_cache;
478 dma_addr_t ev_ctx_cache_dma_handle;
479
480 struct mhi_addr cmd_ctx_shadow;
481 struct mhi_dev_ch_ctx *cmd_ctx_cache;
482 dma_addr_t cmd_ctx_cache_dma_handle;
483 struct mhi_dev_ring *ring;
484 struct mhi_dev_channel *ch;
485
486 int ctrl_int;
487 int cmd_int;
488 /* CHDB and EVDB device interrupt state */
489 struct mhi_interrupt_state chdb[4];
490 struct mhi_interrupt_state evdb[4];
491
492 /* Scheduler work */
493 struct work_struct chdb_ctrl_work;
494 struct mutex mhi_lock;
495 struct mutex mhi_event_lock;
496
497 /* process a ring element */
498 struct workqueue_struct *pending_ring_wq;
499 struct work_struct pending_work;
500
501 struct list_head event_ring_list;
502 struct list_head process_ring_list;
503
504 uint32_t cmd_ring_idx;
505 uint32_t ev_ring_start;
506 uint32_t ch_ring_start;
507
508 /* IPA Handles */
509 u32 ipa_clnt_hndl[4];
510 struct workqueue_struct *ring_init_wq;
511 struct work_struct ring_init_cb_work;
512
513 /* EP PCIe registration */
514 struct ep_pcie_register_event event_reg;
515 u32 ifc_id;
516 struct ep_pcie_hw *phandle;
517
518 atomic_t write_active;
519 atomic_t is_suspended;
520 struct mutex mhi_write_test;
521 u32 mhi_ep_msi_num;
522 u32 mhi_version;
523 void *dma_cache;
524 void *read_handle;
525 void *write_handle;
526 /* Physical scratch buffer for writing control data to the host */
527 dma_addr_t cache_dma_handle;
528 /*
529 * Physical scratch buffer address used when picking host data
530 * from the host used in mhi_read()
531 */
532 dma_addr_t read_dma_handle;
533 /*
534 * Physical scratch buffer address used when writing to the host
535 * region from device used in mhi_write()
536 */
537 dma_addr_t write_dma_handle;
538};
539
540enum mhi_msg_level {
541 MHI_MSG_VERBOSE = 0x0,
542 MHI_MSG_INFO = 0x1,
543 MHI_MSG_DBG = 0x2,
544 MHI_MSG_WARNING = 0x3,
545 MHI_MSG_ERROR = 0x4,
546 MHI_MSG_CRITICAL = 0x5,
547 MHI_MSG_reserved = 0x80000000
548};
549
550extern enum mhi_msg_level mhi_msg_lvl;
551extern enum mhi_msg_level mhi_ipc_msg_lvl;
552extern void *mhi_ipc_log;
553
554#define mhi_log(_msg_lvl, _msg, ...) do { \
555 if (_msg_lvl >= mhi_msg_lvl) { \
556 pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \
557 } \
558 if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \
559 ipc_log_string(mhi_ipc_log, \
560 "[%s] " _msg, __func__, ##__VA_ARGS__); \
561 } \
562} while (0)
563
564/* SW channel client list */
565enum mhi_client_channel {
566 MHI_CLIENT_LOOPBACK_OUT = 0,
567 MHI_CLIENT_LOOPBACK_IN = 1,
568 MHI_CLIENT_SAHARA_OUT = 2,
569 MHI_CLIENT_SAHARA_IN = 3,
570 MHI_CLIENT_DIAG_OUT = 4,
571 MHI_CLIENT_DIAG_IN = 5,
572 MHI_CLIENT_SSR_OUT = 6,
573 MHI_CLIENT_SSR_IN = 7,
574 MHI_CLIENT_QDSS_OUT = 8,
575 MHI_CLIENT_QDSS_IN = 9,
576 MHI_CLIENT_EFS_OUT = 10,
577 MHI_CLIENT_EFS_IN = 11,
578 MHI_CLIENT_MBIM_OUT = 12,
579 MHI_CLIENT_MBIM_IN = 13,
580 MHI_CLIENT_QMI_OUT = 14,
581 MHI_CLIENT_QMI_IN = 15,
582 MHI_CLIENT_IP_CTRL_0_OUT = 16,
583 MHI_CLIENT_IP_CTRL_0_IN = 17,
584 MHI_CLIENT_IP_CTRL_1_OUT = 18,
585 MHI_CLIENT_IP_CTRL_1_IN = 19,
586 MHI_CLIENT_DCI_OUT = 20,
587 MHI_CLIENT_DCI_IN = 21,
588 MHI_CLIENT_IP_CTRL_3_OUT = 22,
589 MHI_CLIENT_IP_CTRL_3_IN = 23,
590 MHI_CLIENT_IP_CTRL_4_OUT = 24,
591 MHI_CLIENT_IP_CTRL_4_IN = 25,
592 MHI_CLIENT_IP_CTRL_5_OUT = 26,
593 MHI_CLIENT_IP_CTRL_5_IN = 27,
594 MHI_CLIENT_IP_CTRL_6_OUT = 28,
595 MHI_CLIENT_IP_CTRL_6_IN = 29,
596 MHI_CLIENT_IP_CTRL_7_OUT = 30,
597 MHI_CLIENT_IP_CTRL_7_IN = 31,
598 MHI_CLIENT_DUN_OUT = 32,
599 MHI_CLIENT_DUN_IN = 33,
600 MHI_CLIENT_IP_SW_0_OUT = 34,
601 MHI_CLIENT_IP_SW_0_IN = 35,
602 MHI_CLIENT_IP_SW_1_OUT = 36,
603 MHI_CLIENT_IP_SW_1_IN = 37,
604 MHI_CLIENT_IP_SW_2_OUT = 38,
605 MHI_CLIENT_IP_SW_2_IN = 39,
606 MHI_CLIENT_IP_SW_3_OUT = 40,
607 MHI_CLIENT_IP_SW_3_IN = 41,
608 MHI_CLIENT_CSVT_OUT = 42,
609 MHI_CLIENT_CSVT_IN = 43,
610 MHI_CLIENT_SMCT_OUT = 44,
611 MHI_CLIENT_SMCT_IN = 45,
612 MHI_MAX_SOFTWARE_CHANNELS = 46,
613 MHI_CLIENT_TEST_OUT = 60,
614 MHI_CLIENT_TEST_IN = 61,
615 MHI_CLIENT_RESERVED_1_LOWER = 62,
616 MHI_CLIENT_RESERVED_1_UPPER = 99,
617 MHI_CLIENT_IP_HW_0_OUT = 100,
618 MHI_CLIENT_IP_HW_0_IN = 101,
619 MHI_CLIENT_RESERVED_2_LOWER = 102,
620 MHI_CLIENT_RESERVED_2_UPPER = 127,
621 MHI_MAX_CHANNELS = 102,
622};
623
624struct mhi_dev_iov {
625 void *addr;
626 uint32_t buf_size;
627};
628
629/**
630 * mhi_dev_open_channel() - Channel open for a given client done prior
631 * to read/write.
632 * @chan_id: Software Channel ID for the assigned client.
633 * @handle_client: Structure device for client handle.
634 * @notifier: Client issued callback notification.
635 */
636int mhi_dev_open_channel(uint32_t chan_id,
637 struct mhi_dev_client **handle_client,
638 void (*event_trigger)(struct mhi_dev_client_cb_reason *cb));
639/**
640 * mhi_dev_close_channel() - Channel close for a given client.
641 */
642int mhi_dev_close_channel(struct mhi_dev_client *handle_client);
643
644/**
645 * mhi_dev_read_channel() - Channel read for a given client
646 * @handle_client: Client Handle issued during mhi_dev_open_channel
647 * @buf: Pointer to the buffer used by the MHI core to copy the data received
648 * from the Host.
649 * @buf_size: Size of the buffer pointer.
650 * @chain : Indicate if the received data is part of chained packet.
651 */
652int mhi_dev_read_channel(struct mhi_dev_client *handle_client,
653 void *buf, uint32_t buf_size, uint32_t *chain);
654
655/**
656 * mhi_dev_write_channel() - Channel write for a given software client.
657 * @handle_client: Client Handle issued during mhi_dev_open_channel
658 * @buf: Pointer to the buffer used by the MHI core to copy the data from the
659 * device to the host.
660 * @buf_size: Size of the buffer pointer.
661 */
662int mhi_dev_write_channel(struct mhi_dev_client *handle_client, void *buf,
663 uint32_t buf_size);
664
665/**
666 * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process.
667 * @handle_client: Client Handle issued during mhi_dev_open_channel
668 */
669int mhi_dev_channel_isempty(struct mhi_dev_client *handle);
670
671struct mhi_dev_trace {
672 unsigned int timestamp;
673 uint32_t data[TRACE_DATA_MAX];
674};
675
676/* MHI Ring related functions */
677
678/**
679 * mhi_ring_init() - Initializes the Ring id to the default un-initialized
680 * state. Once a start command is received, the respective ring
681 * is then prepared by fetching the context and updating the
682 * offset.
683 * @ring: Ring for the respective context - Channel/Event/Command.
684 * @type: Command/Event or Channel transfer ring.
685 * @id: Index to the ring id. For command its usually 1, Event rings
686 * may vary from 1 to 128. Channels vary from 1 to 256.
687 */
688void mhi_ring_init(struct mhi_dev_ring *ring,
689 enum mhi_dev_ring_type type, int id);
690
691/**
692 * mhi_ring_start() - Fetches the respective transfer ring's context from
693 * the host and updates the write offset.
694 * @ring: Ring for the respective context - Channel/Event/Command.
695 * @ctx: Transfer ring of type mhi_dev_ring_ctx.
696 * @dev: MHI device structure.
697 */
698int mhi_ring_start(struct mhi_dev_ring *ring,
699 union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi);
700
701/**
702 * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally.
703 * @ring: Ring for the respective context - Channel/Event/Command.
704 * @wr_offset: Cache the TRE's upto the write offset value.
705 */
706int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset);
707
708/**
709 * mhi_dev_update_wr_offset() - Check for any updates in the write offset.
710 * @ring: Ring for the respective context - Channel/Event/Command.
711 */
712int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring);
713
714/**
715 * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements
716 * and invoke the clients callback.
717 * @ring: Ring for the respective context - Channel/Event/Command.
718 */
719int mhi_dev_process_ring(struct mhi_dev_ring *ring);
720
721/**
722 * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the
723 * clients callback.
724 * @ring: Ring for the respective context - Channel/Event/Command.
725 * @offset: Offset index into the respective ring's cache element.
726 */
727int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset);
728
729/**
730 * mhi_dev_add_element() - Copy the element to the respective transfer rings
731 * read pointer and increment the index.
732 * @ring: Ring for the respective context - Channel/Event/Command.
733 * @element: Transfer ring element to be copied to the host memory.
734 */
735int mhi_dev_add_element(struct mhi_dev_ring *ring,
736 union mhi_dev_ring_element_type *element);
737
738/**
739 * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data
740 * from device to the host.
741 * @dst_pa: Physical destination address.
742 * @src: Source virtual address.
743 * @len: Numer of bytes to be transferred.
744 * @mhi: MHI dev structure.
745 */
746int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len,
747 struct mhi_dev *mhi);
748
749/**
750 * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data
751 * from host to the device.
752 * @dst: Physical destination virtual address.
753 * @src_pa: Source physical address.
754 * @len: Numer of bytes to be transferred.
755 * @mhi: MHI dev structure.
756 */
757int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len,
758 struct mhi_dev *mhi);
759
760/**
761 * mhi_dev_write_to_host() - memcpy equivalent API to transfer data
762 * from device to host.
763 * @host: Host and device address details.
764 * @buf: Data buffer that needs to be written to the host.
765 * @size: Data buffer size.
766 */
767void mhi_dev_write_to_host(struct mhi_addr *host, void *buf, size_t size,
768 struct mhi_dev *mhi);
769
770/**
771 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
772 * from host to device.
773 * @host: Host and device address details.
774 * @buf: Data buffer that needs to be read from the host.
775 * @size: Data buffer size.
776 */
777void mhi_dev_read_from_host(struct mhi_addr *dst, dma_addr_t buf, size_t size);
778
779/**
780 * mhi_dev_read_from_host() - memcpy equivalent API to transfer data
781 * from host to device.
782 * @host: Host and device address details.
783 * @buf: Data buffer that needs to be read from the host.
784 * @size: Data buffer size.
785 */
786void mhi_ring_set_cb(struct mhi_dev_ring *ring,
787 void (*ring_cb)(struct mhi_dev *dev,
788 union mhi_dev_ring_element_type *el, void *ctx));
789
790/**
791 * mhi_ring_set_state() - Sets internal state of the ring for tracking whether
792 * a ring is being processed, idle or uninitialized.
793 * @ring: Ring for the respective context - Channel/Event/Command.
794 * @state: state of type mhi_dev_ring_state.
795 */
796void mhi_ring_set_state(struct mhi_dev_ring *ring,
797 enum mhi_dev_ring_state state);
798
799/**
800 * mhi_ring_get_state() - Obtains the internal state of the ring.
801 * @ring: Ring for the respective context - Channel/Event/Command.
802 */
803enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring);
804
805/* MMIO related functions */
806
807/**
808 * mhi_dev_mmio_read() - Generic MHI MMIO register read API.
809 * @dev: MHI device structure.
810 * @offset: MHI address offset from base.
811 * @reg_val: Pointer the register value is stored to.
812 */
813int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset,
814 uint32_t *reg_value);
815
816/**
817 * mhi_dev_mmio_read() - Generic MHI MMIO register write API.
818 * @dev: MHI device structure.
819 * @offset: MHI address offset from base.
820 * @val: Value to be written to the register offset.
821 */
822int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset,
823 uint32_t val);
824
825/**
826 * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API.
827 * @dev: MHI device structure.
828 * @offset: MHI address offset from base.
829 * @mask: Register field mask.
830 * @shift: Register field mask shift value.
831 * @val: Value to be written to the register offset.
832 */
833int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset,
834 uint32_t mask, uint32_t shift,
835 uint32_t val);
836/**
837 * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API.
838 * @dev: MHI device structure.
839 * @offset: MHI address offset from base.
840 * @mask: Register field mask.
841 * @shift: Register field mask shift value.
842 * @reg_val: Pointer the register value is stored to.
843 */
844int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset,
845 uint32_t mask, uint32_t shift,
846 uint32_t *reg_val);
847/**
848 * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt.
849 * @dev: MHI device structure.
850 */
851int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev);
852
853/**
854 * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt.
855 * @dev: MHI device structure.
856 */
857int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev);
858
859/**
860 * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status.
861 * @dev: MHI device structure.
862 */
863int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev);
864
865/**
866 * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt.
867 * @dev: MHI device structure.
868 */
869int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev);
870
871/**
872 * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt.
873 * @dev: MHI device structure.
874 */
875int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev);
876
877/**
878 * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status.
879 * @dev: MHI device structure.
880 */
881int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev);
882
883/**
884 * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given
885 * channel id.
886 * @dev: MHI device structure.
887 * @chdb_id: Channel id number.
888 */
889int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
890/**
891 * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given
892 * channel id.
893 * @dev: MHI device structure.
894 * @chdb_id: Channel id number.
895 */
896int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id);
897
898/**
899 * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given
900 * event ring id.
901 * @dev: MHI device structure.
902 * @erdb_id: Event ring id number.
903 */
904int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
905
906/**
907 * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given
908 * event ring id.
909 * @dev: MHI device structure.
910 * @erdb_id: Event ring id number.
911 */
912int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id);
913
914/**
915 * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell
916 * interrupts.
917 * @dev: MHI device structure.
918 */
919int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev);
920
921/**
922 * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell
923 * interrupts.
924 * @dev: MHI device structure.
925 */
926int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev);
927
928/**
929 * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell
930 * interrupts.
931 * @dev: MHI device structure.
932 */
933int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev);
934
935/**
936 * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell
937 * interrupts.
938 * @dev: MHI device structure.
939 */
940int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev);
941
942/**
943 * mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell
944 * interrupts.
945 * @dev: MHI device structure.
946 */
947int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev);
948
949/**
950 * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell
951 * interrupts.
952 * @dev: MHI device structure.
953 */
954int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev);
955
956/**
957 * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts.
958 * @dev: MHI device structure.
959 */
960int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev);
961
962/**
963 * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address.
964 @dev: MHI device structure.
965 */
966int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev);
967
968/**
969 * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address.
970 * @dev: MHI device structure.
971 */
972int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev);
973
974/**
975 * mhi_dev_get_crc_base() - Fetch the Command ring context base address.
976 * @dev: MHI device structure.
977 */
978int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev);
979
980/**
981 * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID.
982 * @dev: MHI device structure.
983 * @wr_offset: Pointer of the write offset to be written to.
984 */
985int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
986
987/**
988 * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID.
989 * @dev: MHI device structure.
990 * @wr_offset: Pointer of the write offset to be written to.
991 */
992int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
993
994/**
995 * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID.
996 * @dev: MHI device structure.
997 * @wr_offset: Pointer of the write offset to be written to.
998 */
999int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset);
1000
1001/**
1002 * mhi_dev_mmio_set_env() - Write the Execution Enviornment.
1003 * @dev: MHI device structure.
1004 * @value: Value of the EXEC EVN.
1005 */
1006int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value);
1007
1008/**
1009 * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization.
1010 * @dev: MHI device structure.
1011 */
1012int mhi_dev_mmio_reset(struct mhi_dev *dev);
1013
1014/**
1015 * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host.
1016 * @dev: MHI device structure.
1017 */
1018int mhi_dev_get_mhi_addr(struct mhi_dev *dev);
1019
1020/**
1021 * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3.
1022 * @dev: MHI device structure.
1023 * @state: Pointer of type mhi_dev_state
1024 */
1025int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state);
1026
1027/**
1028 * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event
1029 * rings, support number of channels, and offsets to the Channel
1030 * and Event doorbell from the host.
1031 * @dev: MHI device structure.
1032 */
1033int mhi_dev_mmio_init(struct mhi_dev *dev);
1034
1035/**
1036 * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by
1037 * the host.
1038 * @dev: MHI device structure.
1039 */
1040int mhi_dev_update_ner(struct mhi_dev *dev);
1041
1042/**
1043 * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3.
1044 * @dev: MHI device structure.
1045 */
1046int mhi_dev_restore_mmio(struct mhi_dev *dev);
1047
1048/**
1049 * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3.
1050 * @dev: MHI device structure.
1051 */
1052int mhi_dev_backup_mmio(struct mhi_dev *dev);
1053
1054/**
1055 * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug.
1056 * @dev: MHI device structure.
1057 */
1058int mhi_dev_dump_mmio(struct mhi_dev *dev);
1059
1060/**
1061 * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation
1062 * unit between device and host to map the Data and Control
1063 * information.
1064 * @dev: MHI device structure.
1065 */
1066int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi);
1067
1068/**
1069 * mhi_dev_send_state_change_event() - Send state change event to the host
1070 * such as M0/M1/M2/M3.
1071 * @dev: MHI device structure.
1072 * @state: MHI state of type mhi_dev_state
1073 */
1074int mhi_dev_send_state_change_event(struct mhi_dev *mhi,
1075 enum mhi_dev_state state);
1076/**
1077 * mhi_dev_send_ee_event() - Send Execution enviornment state change
1078 * event to the host.
1079 * @dev: MHI device structure.
1080 * @state: MHI state of type mhi_dev_execenv
1081 */
1082int mhi_dev_send_ee_event(struct mhi_dev *mhi,
1083 enum mhi_dev_execenv exec_env);
1084/**
1085 * mhi_dev_syserr() - System error when unexpected events are received.
1086 * @dev: MHI device structure.
1087 */
1088int mhi_dev_syserr(struct mhi_dev *mhi);
1089
1090/**
1091 * mhi_dev_suspend() - MHI device suspend to stop channel processing at the
1092 * Transfer ring boundary, update the channel state to suspended.
1093 * @dev: MHI device structure.
1094 */
1095int mhi_dev_suspend(struct mhi_dev *mhi);
1096
1097/**
1098 * mhi_dev_resume() - MHI device resume to update the channel state to running.
1099 * @dev: MHI device structure.
1100 */
1101int mhi_dev_resume(struct mhi_dev *mhi);
1102
1103/**
1104 * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW
1105 * accelerated data to be send and prevent MHI suspend.
1106 * @dev: MHI device structure.
1107 */
1108int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi);
1109
1110/**
1111 * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel
1112 * context with IPA when performing a MHI resume.
1113 * @dev: MHI device structure.
1114 */
1115int mhi_pcie_config_db_routing(struct mhi_dev *mhi);
1116
1117/**
1118 * mhi_uci_init() - Initializes the User control interface (UCI) which
1119 * exposes device nodes for the supported MHI software
1120 * channels.
1121 */
1122int mhi_uci_init(void);
1123
1124void mhi_dev_notify_a7_event(struct mhi_dev *mhi);
1125
1126#endif /* _MHI_H_ */