| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 1 | /* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 2 | * |
| 3 | * This program is free software; you can redistribute it and/or modify |
| 4 | * it under the terms of the GNU General Public License version 2 and |
| 5 | * only version 2 as published by the Free Software Foundation. |
| 6 | * |
| 7 | * This program is distributed in the hope that it will be useful, |
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 10 | * GNU General Public License for more details. |
| 11 | */ |
| 12 | |
| 13 | #ifndef __MHI_H |
| 14 | #define __MHI_H |
| 15 | |
| 16 | #include <linux/msm_ep_pcie.h> |
| 17 | #include <linux/types.h> |
| 18 | #include <linux/ipc_logging.h> |
| 19 | #include <linux/dma-mapping.h> |
| 20 | |
| 21 | /** |
| 22 | * MHI control data structures alloted by the host, including |
| 23 | * channel context array, event context array, command context and rings. |
| 24 | */ |
| 25 | |
| 26 | /* Channel context state */ |
| 27 | enum mhi_dev_ch_ctx_state { |
| 28 | MHI_DEV_CH_STATE_DISABLED, |
| 29 | MHI_DEV_CH_STATE_ENABLED, |
| 30 | MHI_DEV_CH_STATE_RUNNING, |
| 31 | MHI_DEV_CH_STATE_SUSPENDED, |
| 32 | MHI_DEV_CH_STATE_STOP, |
| 33 | MHI_DEV_CH_STATE_ERROR, |
| 34 | MHI_DEV_CH_STATE_RESERVED, |
| 35 | MHI_DEV_CH_STATE_32BIT = 0x7FFFFFFF |
| 36 | }; |
| 37 | |
| 38 | /* Channel type */ |
| 39 | enum mhi_dev_ch_ctx_type { |
| 40 | MHI_DEV_CH_TYPE_NONE, |
| 41 | MHI_DEV_CH_TYPE_OUTBOUND_CHANNEL, |
| 42 | MHI_DEV_CH_TYPE_INBOUND_CHANNEL, |
| 43 | MHI_DEV_CH_RESERVED |
| 44 | }; |
| 45 | |
| 46 | /* Channel context type */ |
| 47 | struct mhi_dev_ch_ctx { |
| 48 | enum mhi_dev_ch_ctx_state ch_state; |
| 49 | enum mhi_dev_ch_ctx_type ch_type; |
| 50 | uint32_t err_indx; |
| 51 | uint64_t rbase; |
| 52 | uint64_t rlen; |
| 53 | uint64_t rp; |
| 54 | uint64_t wp; |
| 55 | } __packed; |
| 56 | |
| 57 | enum mhi_dev_ring_element_type_id { |
| 58 | MHI_DEV_RING_EL_INVALID = 0, |
| 59 | MHI_DEV_RING_EL_NOOP = 1, |
| 60 | MHI_DEV_RING_EL_TRANSFER = 2, |
| 61 | MHI_DEV_RING_EL_RESET = 16, |
| 62 | MHI_DEV_RING_EL_STOP = 17, |
| 63 | MHI_DEV_RING_EL_START = 18, |
| 64 | MHI_DEV_RING_EL_MHI_STATE_CHG = 32, |
| 65 | MHI_DEV_RING_EL_CMD_COMPLETION_EVT = 33, |
| 66 | MHI_DEV_RING_EL_TRANSFER_COMPLETION_EVENT = 34, |
| 67 | MHI_DEV_RING_EL_EE_STATE_CHANGE_NOTIFY = 64, |
| 68 | MHI_DEV_RING_EL_UNDEF |
| 69 | }; |
| 70 | |
| 71 | enum mhi_dev_ring_state { |
| 72 | RING_STATE_UINT = 0, |
| 73 | RING_STATE_IDLE, |
| 74 | RING_STATE_PENDING, |
| 75 | }; |
| 76 | |
| 77 | enum mhi_dev_ring_type { |
| 78 | RING_TYPE_CMD = 0, |
| 79 | RING_TYPE_ER, |
| 80 | RING_TYPE_CH, |
| 81 | RING_TYPE_INVAL |
| 82 | }; |
| 83 | |
| 84 | /* Event context interrupt moderation */ |
| 85 | enum mhi_dev_evt_ctx_int_mod_timer { |
| 86 | MHI_DEV_EVT_INT_MODERATION_DISABLED |
| 87 | }; |
| 88 | |
| 89 | /* Event ring type */ |
| 90 | enum mhi_dev_evt_ctx_event_ring_type { |
| 91 | MHI_DEV_EVT_TYPE_DEFAULT, |
| 92 | MHI_DEV_EVT_TYPE_VALID, |
| 93 | MHI_DEV_EVT_RESERVED |
| 94 | }; |
| 95 | |
| 96 | /* Event ring context type */ |
| 97 | struct mhi_dev_ev_ctx { |
| 98 | uint32_t res1:16; |
| 99 | enum mhi_dev_evt_ctx_int_mod_timer intmodt:16; |
| 100 | enum mhi_dev_evt_ctx_event_ring_type ertype; |
| 101 | uint32_t msivec; |
| 102 | uint64_t rbase; |
| 103 | uint64_t rlen; |
| 104 | uint64_t rp; |
| 105 | uint64_t wp; |
| 106 | } __packed; |
| 107 | |
| 108 | /* Command context */ |
| 109 | struct mhi_dev_cmd_ctx { |
| 110 | uint32_t res1; |
| 111 | uint32_t res2; |
| 112 | uint32_t res3; |
| 113 | uint64_t rbase; |
| 114 | uint64_t rlen; |
| 115 | uint64_t rp; |
| 116 | uint64_t wp; |
| 117 | } __packed; |
| 118 | |
| 119 | /* generic context */ |
| 120 | struct mhi_dev_gen_ctx { |
| 121 | uint32_t res1; |
| 122 | uint32_t res2; |
| 123 | uint32_t res3; |
| 124 | uint64_t rbase; |
| 125 | uint64_t rlen; |
| 126 | uint64_t rp; |
| 127 | uint64_t wp; |
| 128 | } __packed; |
| 129 | |
| 130 | /* Transfer ring element */ |
| 131 | struct mhi_dev_transfer_ring_element { |
| 132 | uint64_t data_buf_ptr; |
| 133 | uint32_t len:16; |
| 134 | uint32_t res1:16; |
| 135 | uint32_t chain:1; |
| 136 | uint32_t res2:7; |
| 137 | uint32_t ieob:1; |
| 138 | uint32_t ieot:1; |
| 139 | uint32_t bei:1; |
| 140 | uint32_t res3:5; |
| 141 | enum mhi_dev_ring_element_type_id type:8; |
| 142 | uint32_t res4:8; |
| 143 | } __packed; |
| 144 | |
| 145 | /* Command ring element */ |
| 146 | /* Command ring No op command */ |
| 147 | struct mhi_dev_cmd_ring_op { |
| 148 | uint64_t res1; |
| 149 | uint32_t res2; |
| 150 | uint32_t res3:16; |
| 151 | enum mhi_dev_ring_element_type_id type:8; |
| 152 | uint32_t chid:8; |
| 153 | } __packed; |
| 154 | |
| 155 | /* Command ring reset channel command */ |
| 156 | struct mhi_dev_cmd_ring_reset_channel_cmd { |
| 157 | uint64_t res1; |
| 158 | uint32_t res2; |
| 159 | uint32_t res3:16; |
| 160 | enum mhi_dev_ring_element_type_id type:8; |
| 161 | uint32_t chid:8; |
| 162 | } __packed; |
| 163 | |
| 164 | /* Command ring stop channel command */ |
| 165 | struct mhi_dev_cmd_ring_stop_channel_cmd { |
| 166 | uint64_t res1; |
| 167 | uint32_t res2; |
| 168 | uint32_t res3:16; |
| 169 | enum mhi_dev_ring_element_type_id type:8; |
| 170 | uint32_t chid:8; |
| 171 | } __packed; |
| 172 | |
| 173 | /* Command ring start channel command */ |
| 174 | struct mhi_dev_cmd_ring_start_channel_cmd { |
| 175 | uint64_t res1; |
| 176 | uint32_t seqnum; |
| 177 | uint32_t reliable:1; |
| 178 | uint32_t res2:15; |
| 179 | enum mhi_dev_ring_element_type_id type:8; |
| 180 | uint32_t chid:8; |
| 181 | } __packed; |
| 182 | |
| 183 | enum mhi_dev_cmd_completion_code { |
| 184 | MHI_CMD_COMPL_CODE_INVALID = 0, |
| 185 | MHI_CMD_COMPL_CODE_SUCCESS = 1, |
| 186 | MHI_CMD_COMPL_CODE_EOT = 2, |
| 187 | MHI_CMD_COMPL_CODE_OVERFLOW = 3, |
| 188 | MHI_CMD_COMPL_CODE_EOB = 4, |
| 189 | MHI_CMD_COMPL_CODE_UNDEFINED = 16, |
| 190 | MHI_CMD_COMPL_CODE_RING_EL = 17, |
| 191 | MHI_CMD_COMPL_CODE_RES |
| 192 | }; |
| 193 | |
| 194 | /* Event ring elements */ |
| 195 | /* Transfer completion event */ |
| 196 | struct mhi_dev_event_ring_transfer_completion { |
| 197 | uint64_t ptr; |
| 198 | uint32_t len:16; |
| 199 | uint32_t res1:8; |
| 200 | enum mhi_dev_cmd_completion_code code:8; |
| 201 | uint32_t res2:16; |
| 202 | enum mhi_dev_ring_element_type_id type:8; |
| 203 | uint32_t chid:8; |
| 204 | } __packed; |
| 205 | |
| 206 | /* Command completion event */ |
| 207 | struct mhi_dev_event_ring_cmd_completion { |
| 208 | uint64_t ptr; |
| 209 | uint32_t res1:24; |
| 210 | enum mhi_dev_cmd_completion_code code:8; |
| 211 | uint32_t res2:16; |
| 212 | enum mhi_dev_ring_element_type_id type:8; |
| 213 | uint32_t res3:8; |
| 214 | } __packed; |
| 215 | |
| 216 | enum mhi_dev_state { |
| 217 | MHI_DEV_RESET_STATE = 0, |
| 218 | MHI_DEV_READY_STATE, |
| 219 | MHI_DEV_M0_STATE, |
| 220 | MHI_DEV_M1_STATE, |
| 221 | MHI_DEV_M2_STATE, |
| 222 | MHI_DEV_M3_STATE, |
| 223 | MHI_DEV_MAX_STATE, |
| 224 | MHI_DEV_SYSERR_STATE = 0xff |
| 225 | }; |
| 226 | |
| 227 | /* MHI state change event */ |
| 228 | struct mhi_dev_event_ring_state_change { |
| 229 | uint64_t ptr; |
| 230 | uint32_t res1:24; |
| 231 | enum mhi_dev_state mhistate:8; |
| 232 | uint32_t res2:16; |
| 233 | enum mhi_dev_ring_element_type_id type:8; |
| 234 | uint32_t res3:8; |
| 235 | } __packed; |
| 236 | |
| 237 | enum mhi_dev_execenv { |
| 238 | MHI_DEV_SBL_EE = 1, |
| 239 | MHI_DEV_AMSS_EE = 2, |
| 240 | MHI_DEV_UNRESERVED |
| 241 | }; |
| 242 | |
| 243 | /* EE state change event */ |
| 244 | struct mhi_dev_event_ring_ee_state_change { |
| 245 | uint64_t ptr; |
| 246 | uint32_t res1:24; |
| 247 | enum mhi_dev_execenv execenv:8; |
| 248 | uint32_t res2:16; |
| 249 | enum mhi_dev_ring_element_type_id type:8; |
| 250 | uint32_t res3:8; |
| 251 | } __packed; |
| 252 | |
| 253 | /* Generic cmd to parse common details like type and channel id */ |
| 254 | struct mhi_dev_ring_generic { |
| 255 | uint64_t ptr; |
| 256 | uint32_t res1:24; |
| 257 | enum mhi_dev_state mhistate:8; |
| 258 | uint32_t res2:16; |
| 259 | enum mhi_dev_ring_element_type_id type:8; |
| 260 | uint32_t chid:8; |
| 261 | } __packed; |
| 262 | |
| 263 | struct mhi_config { |
| 264 | uint32_t mhi_reg_len; |
| 265 | uint32_t version; |
| 266 | uint32_t event_rings; |
| 267 | uint32_t channels; |
| 268 | uint32_t chdb_offset; |
| 269 | uint32_t erdb_offset; |
| 270 | }; |
| 271 | |
| 272 | #define NUM_CHANNELS 128 |
| 273 | #define HW_CHANNEL_BASE 100 |
| 274 | #define HW_CHANNEL_END 107 |
| 275 | #define MHI_ENV_VALUE 2 |
| 276 | #define MHI_MASK_ROWS_CH_EV_DB 4 |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 277 | #define TRB_MAX_DATA_SIZE 8192 |
| Siddartha Mohanadoss | ba314f2 | 2018-03-19 15:43:50 -0700 | [diff] [blame] | 278 | #define MHI_CTRL_STATE 100 |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 279 | #define IPA_DMA_SYNC 1 |
| 280 | #define IPA_DMA_ASYNC 0 |
| 281 | |
| 282 | /*maximum trasnfer completion events buffer*/ |
| 283 | #define MAX_TR_EVENTS 50 |
| 284 | /*maximum event requests */ |
| 285 | #define MHI_MAX_EVT_REQ 50 |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 286 | |
| 287 | /* Possible ring element types */ |
| 288 | union mhi_dev_ring_element_type { |
| 289 | struct mhi_dev_cmd_ring_op cmd_no_op; |
| 290 | struct mhi_dev_cmd_ring_reset_channel_cmd cmd_reset; |
| 291 | struct mhi_dev_cmd_ring_stop_channel_cmd cmd_stop; |
| 292 | struct mhi_dev_cmd_ring_start_channel_cmd cmd_start; |
| 293 | struct mhi_dev_transfer_ring_element tre; |
| 294 | struct mhi_dev_event_ring_transfer_completion evt_tr_comp; |
| 295 | struct mhi_dev_event_ring_cmd_completion evt_cmd_comp; |
| 296 | struct mhi_dev_event_ring_state_change evt_state_change; |
| 297 | struct mhi_dev_event_ring_ee_state_change evt_ee_state; |
| 298 | struct mhi_dev_ring_generic generic; |
| 299 | }; |
| 300 | |
| 301 | /* Transfer ring element type */ |
| 302 | union mhi_dev_ring_ctx { |
| 303 | struct mhi_dev_cmd_ctx cmd; |
| 304 | struct mhi_dev_ev_ctx ev; |
| 305 | struct mhi_dev_ch_ctx ch; |
| 306 | struct mhi_dev_gen_ctx generic; |
| 307 | }; |
| 308 | |
| 309 | /* MHI host Control and data address region */ |
| 310 | struct mhi_host_addr { |
| 311 | uint32_t ctrl_base_lsb; |
| 312 | uint32_t ctrl_base_msb; |
| 313 | uint32_t ctrl_limit_lsb; |
| 314 | uint32_t ctrl_limit_msb; |
| 315 | uint32_t data_base_lsb; |
| 316 | uint32_t data_base_msb; |
| 317 | uint32_t data_limit_lsb; |
| 318 | uint32_t data_limit_msb; |
| 319 | }; |
| 320 | |
| 321 | /* MHI physical and virtual address region */ |
| 322 | struct mhi_meminfo { |
| 323 | struct device *dev; |
| 324 | uintptr_t pa_aligned; |
| 325 | uintptr_t pa_unaligned; |
| 326 | uintptr_t va_aligned; |
| 327 | uintptr_t va_unaligned; |
| 328 | uintptr_t size; |
| 329 | }; |
| 330 | |
| 331 | struct mhi_addr { |
| 332 | uint64_t host_pa; |
| 333 | uintptr_t device_pa; |
| 334 | uintptr_t device_va; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 335 | size_t size; |
| 336 | dma_addr_t phy_addr; |
| 337 | void *virt_addr; |
| 338 | bool use_ipa_dma; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 339 | }; |
| 340 | |
| 341 | struct mhi_interrupt_state { |
| 342 | uint32_t mask; |
| 343 | uint32_t status; |
| 344 | }; |
| 345 | |
| 346 | enum mhi_dev_channel_state { |
| 347 | MHI_DEV_CH_UNINT, |
| 348 | MHI_DEV_CH_STARTED, |
| 349 | MHI_DEV_CH_PENDING_START, |
| 350 | MHI_DEV_CH_PENDING_STOP, |
| 351 | MHI_DEV_CH_STOPPED, |
| 352 | MHI_DEV_CH_CLOSED, |
| 353 | }; |
| 354 | |
| 355 | enum mhi_dev_ch_operation { |
| 356 | MHI_DEV_OPEN_CH, |
| 357 | MHI_DEV_CLOSE_CH, |
| 358 | MHI_DEV_READ_CH, |
| 359 | MHI_DEV_READ_WR, |
| 360 | MHI_DEV_POLL, |
| 361 | }; |
| 362 | |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 363 | enum mhi_ctrl_info { |
| 364 | MHI_STATE_CONFIGURED = 0, |
| 365 | MHI_STATE_CONNECTED = 1, |
| 366 | MHI_STATE_DISCONNECTED = 2, |
| 367 | MHI_STATE_INVAL, |
| 368 | }; |
| 369 | |
| 370 | enum mhi_dev_tr_compl_evt_type { |
| 371 | SEND_EVENT_BUFFER, |
| 372 | SEND_EVENT_RD_OFFSET, |
| 373 | }; |
| 374 | |
| 375 | enum mhi_dev_transfer_type { |
| 376 | MHI_DEV_DMA_SYNC, |
| 377 | MHI_DEV_DMA_ASYNC, |
| 378 | }; |
| 379 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 380 | struct mhi_dev_channel; |
| 381 | |
| 382 | struct mhi_dev_ring { |
| 383 | struct list_head list; |
| 384 | struct mhi_dev *mhi_dev; |
| 385 | |
| 386 | uint32_t id; |
| 387 | uint32_t rd_offset; |
| 388 | uint32_t wr_offset; |
| 389 | uint32_t ring_size; |
| 390 | |
| 391 | enum mhi_dev_ring_type type; |
| 392 | enum mhi_dev_ring_state state; |
| 393 | |
| 394 | /* device virtual address location of the cached host ring ctx data */ |
| 395 | union mhi_dev_ring_element_type *ring_cache; |
| 396 | /* Physical address of the cached ring copy on the device side */ |
| 397 | dma_addr_t ring_cache_dma_handle; |
| 398 | /* Physical address of the host where we will write/read to/from */ |
| 399 | struct mhi_addr ring_shadow; |
| 400 | /* Ring type - cmd, event, transfer ring and its rp/wp... */ |
| 401 | union mhi_dev_ring_ctx *ring_ctx; |
| 402 | /* ring_ctx_shadow -> tracking ring_ctx in the host */ |
| 403 | union mhi_dev_ring_ctx *ring_ctx_shadow; |
| 404 | void (*ring_cb)(struct mhi_dev *dev, |
| 405 | union mhi_dev_ring_element_type *el, |
| 406 | void *ctx); |
| 407 | }; |
| 408 | |
| 409 | static inline void mhi_dev_ring_inc_index(struct mhi_dev_ring *ring, |
| 410 | uint32_t rd_offset) |
| 411 | { |
| 412 | ring->rd_offset++; |
| 413 | if (ring->rd_offset == ring->ring_size) |
| 414 | ring->rd_offset = 0; |
| 415 | } |
| 416 | |
| 417 | /* trace information planned to use for read/write */ |
| 418 | #define TRACE_DATA_MAX 128 |
| 419 | #define MHI_DEV_DATA_MAX 512 |
| 420 | |
| 421 | #define MHI_DEV_MMIO_RANGE 0xc80 |
| 422 | |
| 423 | enum cb_reason { |
| 424 | MHI_DEV_TRE_AVAILABLE = 0, |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 425 | MHI_DEV_CTRL_UPDATE, |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 426 | }; |
| 427 | |
| 428 | struct mhi_dev_client_cb_reason { |
| 429 | uint32_t ch_id; |
| 430 | enum cb_reason reason; |
| 431 | }; |
| 432 | |
| 433 | struct mhi_dev_client { |
| 434 | struct list_head list; |
| 435 | struct mhi_dev_channel *channel; |
| 436 | void (*event_trigger)(struct mhi_dev_client_cb_reason *cb); |
| 437 | |
| 438 | /* mhi_dev calls are fully synchronous -- only one call may be |
| 439 | * active per client at a time for now. |
| 440 | */ |
| 441 | struct mutex write_lock; |
| 442 | wait_queue_head_t wait; |
| 443 | |
| 444 | /* trace logs */ |
| 445 | spinlock_t tr_lock; |
| 446 | unsigned int tr_head; |
| 447 | unsigned int tr_tail; |
| 448 | struct mhi_dev_trace *tr_log; |
| 449 | |
| 450 | /* client buffers */ |
| 451 | struct mhi_dev_iov *iov; |
| 452 | uint32_t nr_iov; |
| 453 | }; |
| 454 | |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 455 | struct ring_cache_req { |
| 456 | struct completion *done; |
| 457 | void *context; |
| 458 | }; |
| 459 | |
| 460 | struct event_req { |
| 461 | union mhi_dev_ring_element_type *tr_events; |
| 462 | u32 num_events; |
| 463 | dma_addr_t dma; |
| 464 | u32 dma_len; |
| 465 | dma_addr_t event_rd_dma; |
| 466 | void *context; |
| 467 | enum mhi_dev_tr_compl_evt_type event_type; |
| 468 | u32 event_ring; |
| 469 | void (*client_cb)(void *req); |
| 470 | struct list_head list; |
| 471 | }; |
| 472 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 473 | struct mhi_dev_channel { |
| 474 | struct list_head list; |
| 475 | struct list_head clients; |
| 476 | /* synchronization for changing channel state, |
| 477 | * adding/removing clients, mhi_dev callbacks, etc |
| 478 | */ |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 479 | struct mhi_dev_ring *ring; |
| 480 | |
| 481 | enum mhi_dev_channel_state state; |
| 482 | uint32_t ch_id; |
| 483 | enum mhi_dev_ch_ctx_type ch_type; |
| 484 | struct mutex ch_lock; |
| 485 | /* client which the current inbound/outbound message is for */ |
| 486 | struct mhi_dev_client *active_client; |
| 487 | |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 488 | struct list_head event_req_buffers; |
| 489 | struct event_req *curr_ereq; |
| 490 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 491 | /* current TRE being processed */ |
| 492 | uint64_t tre_loc; |
| 493 | /* current TRE size */ |
| 494 | uint32_t tre_size; |
| 495 | /* tre bytes left to read/write */ |
| 496 | uint32_t tre_bytes_left; |
| 497 | /* td size being read/written from/to so far */ |
| 498 | uint32_t td_size; |
| 499 | bool wr_request_active; |
| 500 | bool skip_td; |
| 501 | }; |
| 502 | |
| 503 | /* Structure device for mhi dev */ |
| 504 | struct mhi_dev { |
| 505 | struct platform_device *pdev; |
| 506 | struct device *dev; |
| 507 | /* MHI MMIO related members */ |
| 508 | phys_addr_t mmio_base_pa_addr; |
| 509 | void *mmio_base_addr; |
| 510 | phys_addr_t ipa_uc_mbox_crdb; |
| 511 | phys_addr_t ipa_uc_mbox_erdb; |
| 512 | |
| 513 | uint32_t *mmio_backup; |
| 514 | struct mhi_config cfg; |
| 515 | bool mmio_initialized; |
| 516 | |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 517 | spinlock_t lock; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 518 | /* Host control base information */ |
| 519 | struct mhi_host_addr host_addr; |
| 520 | struct mhi_addr ctrl_base; |
| 521 | struct mhi_addr data_base; |
| 522 | struct mhi_addr ch_ctx_shadow; |
| 523 | struct mhi_dev_ch_ctx *ch_ctx_cache; |
| 524 | dma_addr_t ch_ctx_cache_dma_handle; |
| 525 | struct mhi_addr ev_ctx_shadow; |
| 526 | struct mhi_dev_ch_ctx *ev_ctx_cache; |
| 527 | dma_addr_t ev_ctx_cache_dma_handle; |
| 528 | |
| 529 | struct mhi_addr cmd_ctx_shadow; |
| 530 | struct mhi_dev_ch_ctx *cmd_ctx_cache; |
| 531 | dma_addr_t cmd_ctx_cache_dma_handle; |
| 532 | struct mhi_dev_ring *ring; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 533 | int mhi_irq; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 534 | struct mhi_dev_channel *ch; |
| 535 | |
| 536 | int ctrl_int; |
| 537 | int cmd_int; |
| 538 | /* CHDB and EVDB device interrupt state */ |
| 539 | struct mhi_interrupt_state chdb[4]; |
| 540 | struct mhi_interrupt_state evdb[4]; |
| 541 | |
| 542 | /* Scheduler work */ |
| 543 | struct work_struct chdb_ctrl_work; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 544 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 545 | struct mutex mhi_lock; |
| 546 | struct mutex mhi_event_lock; |
| 547 | |
| 548 | /* process a ring element */ |
| 549 | struct workqueue_struct *pending_ring_wq; |
| 550 | struct work_struct pending_work; |
| 551 | |
| 552 | struct list_head event_ring_list; |
| 553 | struct list_head process_ring_list; |
| 554 | |
| 555 | uint32_t cmd_ring_idx; |
| 556 | uint32_t ev_ring_start; |
| 557 | uint32_t ch_ring_start; |
| 558 | |
| 559 | /* IPA Handles */ |
| 560 | u32 ipa_clnt_hndl[4]; |
| 561 | struct workqueue_struct *ring_init_wq; |
| 562 | struct work_struct ring_init_cb_work; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 563 | struct work_struct re_init; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 564 | |
| 565 | /* EP PCIe registration */ |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 566 | struct workqueue_struct *pcie_event_wq; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 567 | struct ep_pcie_register_event event_reg; |
| 568 | u32 ifc_id; |
| 569 | struct ep_pcie_hw *phandle; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 570 | struct work_struct pcie_event; |
| 571 | struct ep_pcie_msi_config msi_cfg; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 572 | |
| 573 | atomic_t write_active; |
| 574 | atomic_t is_suspended; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 575 | atomic_t mhi_dev_wake; |
| 576 | atomic_t re_init_done; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 577 | struct mutex mhi_write_test; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 578 | u32 device_local_pa_base; |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 579 | u32 mhi_ep_msi_num; |
| 580 | u32 mhi_version; |
| 581 | void *dma_cache; |
| 582 | void *read_handle; |
| 583 | void *write_handle; |
| 584 | /* Physical scratch buffer for writing control data to the host */ |
| 585 | dma_addr_t cache_dma_handle; |
| 586 | /* |
| 587 | * Physical scratch buffer address used when picking host data |
| 588 | * from the host used in mhi_read() |
| 589 | */ |
| 590 | dma_addr_t read_dma_handle; |
| 591 | /* |
| 592 | * Physical scratch buffer address used when writing to the host |
| 593 | * region from device used in mhi_write() |
| 594 | */ |
| 595 | dma_addr_t write_dma_handle; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 596 | |
| 597 | /* Use IPA DMA for Software channel data transfer */ |
| 598 | bool use_ipa; |
| 599 | |
| 600 | /* iATU is required to map control and data region */ |
| 601 | bool config_iatu; |
| 602 | |
| 603 | /* MHI state info */ |
| 604 | enum mhi_ctrl_info ctrl_info; |
| 605 | |
| 606 | /*Register for interrupt */ |
| 607 | bool mhi_int; |
| 608 | /* Registered client callback list */ |
| 609 | struct list_head client_cb_list; |
| Siddartha Mohanadoss | ba314f2 | 2018-03-19 15:43:50 -0700 | [diff] [blame] | 610 | |
| 611 | struct kobj_uevent_env kobj_env; |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 612 | }; |
| 613 | |
| 614 | struct mhi_req { |
| 615 | u32 chan; |
| 616 | u32 mode; |
| 617 | u32 chain; |
| 618 | void *buf; |
| 619 | dma_addr_t dma; |
| 620 | u32 snd_cmpl; |
| 621 | void *context; |
| 622 | size_t len; |
| 623 | size_t actual_len; |
| 624 | uint32_t rd_offset; |
| 625 | struct mhi_dev_client *client; |
| 626 | struct list_head list; |
| 627 | union mhi_dev_ring_element_type *el; |
| 628 | void (*client_cb)(void *req); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 629 | }; |
| 630 | |
| 631 | enum mhi_msg_level { |
| 632 | MHI_MSG_VERBOSE = 0x0, |
| 633 | MHI_MSG_INFO = 0x1, |
| 634 | MHI_MSG_DBG = 0x2, |
| 635 | MHI_MSG_WARNING = 0x3, |
| 636 | MHI_MSG_ERROR = 0x4, |
| 637 | MHI_MSG_CRITICAL = 0x5, |
| 638 | MHI_MSG_reserved = 0x80000000 |
| 639 | }; |
| 640 | |
| 641 | extern enum mhi_msg_level mhi_msg_lvl; |
| 642 | extern enum mhi_msg_level mhi_ipc_msg_lvl; |
| 643 | extern void *mhi_ipc_log; |
| 644 | |
| 645 | #define mhi_log(_msg_lvl, _msg, ...) do { \ |
| 646 | if (_msg_lvl >= mhi_msg_lvl) { \ |
| 647 | pr_err("[%s] "_msg, __func__, ##__VA_ARGS__); \ |
| 648 | } \ |
| 649 | if (mhi_ipc_log && (_msg_lvl >= mhi_ipc_msg_lvl)) { \ |
| 650 | ipc_log_string(mhi_ipc_log, \ |
| 651 | "[%s] " _msg, __func__, ##__VA_ARGS__); \ |
| 652 | } \ |
| 653 | } while (0) |
| 654 | |
| 655 | /* SW channel client list */ |
| 656 | enum mhi_client_channel { |
| 657 | MHI_CLIENT_LOOPBACK_OUT = 0, |
| 658 | MHI_CLIENT_LOOPBACK_IN = 1, |
| 659 | MHI_CLIENT_SAHARA_OUT = 2, |
| 660 | MHI_CLIENT_SAHARA_IN = 3, |
| 661 | MHI_CLIENT_DIAG_OUT = 4, |
| 662 | MHI_CLIENT_DIAG_IN = 5, |
| 663 | MHI_CLIENT_SSR_OUT = 6, |
| 664 | MHI_CLIENT_SSR_IN = 7, |
| 665 | MHI_CLIENT_QDSS_OUT = 8, |
| 666 | MHI_CLIENT_QDSS_IN = 9, |
| 667 | MHI_CLIENT_EFS_OUT = 10, |
| 668 | MHI_CLIENT_EFS_IN = 11, |
| 669 | MHI_CLIENT_MBIM_OUT = 12, |
| 670 | MHI_CLIENT_MBIM_IN = 13, |
| 671 | MHI_CLIENT_QMI_OUT = 14, |
| 672 | MHI_CLIENT_QMI_IN = 15, |
| 673 | MHI_CLIENT_IP_CTRL_0_OUT = 16, |
| 674 | MHI_CLIENT_IP_CTRL_0_IN = 17, |
| 675 | MHI_CLIENT_IP_CTRL_1_OUT = 18, |
| 676 | MHI_CLIENT_IP_CTRL_1_IN = 19, |
| 677 | MHI_CLIENT_DCI_OUT = 20, |
| 678 | MHI_CLIENT_DCI_IN = 21, |
| 679 | MHI_CLIENT_IP_CTRL_3_OUT = 22, |
| 680 | MHI_CLIENT_IP_CTRL_3_IN = 23, |
| 681 | MHI_CLIENT_IP_CTRL_4_OUT = 24, |
| 682 | MHI_CLIENT_IP_CTRL_4_IN = 25, |
| 683 | MHI_CLIENT_IP_CTRL_5_OUT = 26, |
| 684 | MHI_CLIENT_IP_CTRL_5_IN = 27, |
| 685 | MHI_CLIENT_IP_CTRL_6_OUT = 28, |
| 686 | MHI_CLIENT_IP_CTRL_6_IN = 29, |
| 687 | MHI_CLIENT_IP_CTRL_7_OUT = 30, |
| 688 | MHI_CLIENT_IP_CTRL_7_IN = 31, |
| 689 | MHI_CLIENT_DUN_OUT = 32, |
| 690 | MHI_CLIENT_DUN_IN = 33, |
| 691 | MHI_CLIENT_IP_SW_0_OUT = 34, |
| 692 | MHI_CLIENT_IP_SW_0_IN = 35, |
| 693 | MHI_CLIENT_IP_SW_1_OUT = 36, |
| 694 | MHI_CLIENT_IP_SW_1_IN = 37, |
| 695 | MHI_CLIENT_IP_SW_2_OUT = 38, |
| 696 | MHI_CLIENT_IP_SW_2_IN = 39, |
| 697 | MHI_CLIENT_IP_SW_3_OUT = 40, |
| 698 | MHI_CLIENT_IP_SW_3_IN = 41, |
| 699 | MHI_CLIENT_CSVT_OUT = 42, |
| 700 | MHI_CLIENT_CSVT_IN = 43, |
| 701 | MHI_CLIENT_SMCT_OUT = 44, |
| 702 | MHI_CLIENT_SMCT_IN = 45, |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 703 | MHI_CLIENT_IP_SW_4_OUT = 46, |
| 704 | MHI_CLIENT_IP_SW_4_IN = 47, |
| 705 | MHI_MAX_SOFTWARE_CHANNELS = 48, |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 706 | MHI_CLIENT_TEST_OUT = 60, |
| 707 | MHI_CLIENT_TEST_IN = 61, |
| 708 | MHI_CLIENT_RESERVED_1_LOWER = 62, |
| 709 | MHI_CLIENT_RESERVED_1_UPPER = 99, |
| 710 | MHI_CLIENT_IP_HW_0_OUT = 100, |
| 711 | MHI_CLIENT_IP_HW_0_IN = 101, |
| 712 | MHI_CLIENT_RESERVED_2_LOWER = 102, |
| 713 | MHI_CLIENT_RESERVED_2_UPPER = 127, |
| 714 | MHI_MAX_CHANNELS = 102, |
| 715 | }; |
| 716 | |
| Siddartha Mohanadoss | ba314f2 | 2018-03-19 15:43:50 -0700 | [diff] [blame] | 717 | /* Use ID 0 for legacy /dev/mhi_ctrl. Channel 0 is used for internal only */ |
| 718 | #define MHI_DEV_UEVENT_CTRL 0 |
| 719 | |
| 720 | struct mhi_dev_uevent_info { |
| 721 | enum mhi_client_channel channel; |
| 722 | enum mhi_ctrl_info ctrl_info; |
| 723 | }; |
| 724 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 725 | struct mhi_dev_iov { |
| 726 | void *addr; |
| 727 | uint32_t buf_size; |
| 728 | }; |
| 729 | |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 730 | struct mhi_dev_client_cb_data { |
| 731 | void *user_data; |
| 732 | enum mhi_client_channel channel; |
| 733 | enum mhi_ctrl_info ctrl_info; |
| 734 | }; |
| 735 | |
| 736 | typedef void (*mhi_state_cb)(struct mhi_dev_client_cb_data *cb_dat); |
| 737 | |
| 738 | struct mhi_dev_ready_cb_info { |
| 739 | struct list_head list; |
| 740 | mhi_state_cb cb; |
| 741 | struct mhi_dev_client_cb_data cb_data; |
| 742 | }; |
| 743 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 744 | /** |
| 745 | * mhi_dev_open_channel() - Channel open for a given client done prior |
| 746 | * to read/write. |
| 747 | * @chan_id: Software Channel ID for the assigned client. |
| 748 | * @handle_client: Structure device for client handle. |
| 749 | * @notifier: Client issued callback notification. |
| 750 | */ |
| 751 | int mhi_dev_open_channel(uint32_t chan_id, |
| 752 | struct mhi_dev_client **handle_client, |
| 753 | void (*event_trigger)(struct mhi_dev_client_cb_reason *cb)); |
| 754 | /** |
| 755 | * mhi_dev_close_channel() - Channel close for a given client. |
| 756 | */ |
| 757 | int mhi_dev_close_channel(struct mhi_dev_client *handle_client); |
| 758 | |
| 759 | /** |
| 760 | * mhi_dev_read_channel() - Channel read for a given client |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 761 | * @mreq: mreq is the client argument which includes meta info |
| 762 | * like write data location, buffer len, read offset, mode, |
| 763 | * chain and client call back function which will be invoked |
| 764 | * when data read is completed. |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 765 | */ |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 766 | int mhi_dev_read_channel(struct mhi_req *mreq); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 767 | |
| 768 | /** |
| 769 | * mhi_dev_write_channel() - Channel write for a given software client. |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 770 | * @wreq wreq is the client argument which includes meta info like |
| 771 | * client handle, read data location, buffer length, mode, |
| 772 | * and client call back function which will free the packet. |
| 773 | * when data write is completed. |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 774 | */ |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 775 | int mhi_dev_write_channel(struct mhi_req *wreq); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 776 | |
| 777 | /** |
| 778 | * mhi_dev_channel_isempty() - Checks if there is any pending TRE's to process. |
| 779 | * @handle_client: Client Handle issued during mhi_dev_open_channel |
| 780 | */ |
| 781 | int mhi_dev_channel_isempty(struct mhi_dev_client *handle); |
| 782 | |
| 783 | struct mhi_dev_trace { |
| 784 | unsigned int timestamp; |
| 785 | uint32_t data[TRACE_DATA_MAX]; |
| 786 | }; |
| 787 | |
| 788 | /* MHI Ring related functions */ |
| 789 | |
| 790 | /** |
| 791 | * mhi_ring_init() - Initializes the Ring id to the default un-initialized |
| 792 | * state. Once a start command is received, the respective ring |
| 793 | * is then prepared by fetching the context and updating the |
| 794 | * offset. |
| 795 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 796 | * @type: Command/Event or Channel transfer ring. |
| 797 | * @id: Index to the ring id. For command its usually 1, Event rings |
| 798 | * may vary from 1 to 128. Channels vary from 1 to 256. |
| 799 | */ |
| 800 | void mhi_ring_init(struct mhi_dev_ring *ring, |
| 801 | enum mhi_dev_ring_type type, int id); |
| 802 | |
| 803 | /** |
| 804 | * mhi_ring_start() - Fetches the respective transfer ring's context from |
| 805 | * the host and updates the write offset. |
| 806 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 807 | * @ctx: Transfer ring of type mhi_dev_ring_ctx. |
| 808 | * @dev: MHI device structure. |
| 809 | */ |
| 810 | int mhi_ring_start(struct mhi_dev_ring *ring, |
| 811 | union mhi_dev_ring_ctx *ctx, struct mhi_dev *mhi); |
| 812 | |
| 813 | /** |
| 814 | * mhi_dev_cache_ring() - Cache the data for the corresponding ring locally. |
| 815 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 816 | * @wr_offset: Cache the TRE's upto the write offset value. |
| 817 | */ |
| 818 | int mhi_dev_cache_ring(struct mhi_dev_ring *ring, uint32_t wr_offset); |
| 819 | |
| 820 | /** |
| 821 | * mhi_dev_update_wr_offset() - Check for any updates in the write offset. |
| 822 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 823 | */ |
| 824 | int mhi_dev_update_wr_offset(struct mhi_dev_ring *ring); |
| 825 | |
| 826 | /** |
| 827 | * mhi_dev_process_ring() - Update the Write pointer, fetch the ring elements |
| 828 | * and invoke the clients callback. |
| 829 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 830 | */ |
| 831 | int mhi_dev_process_ring(struct mhi_dev_ring *ring); |
| 832 | |
| 833 | /** |
| 834 | * mhi_dev_process_ring_element() - Fetch the ring elements and invoke the |
| 835 | * clients callback. |
| 836 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 837 | * @offset: Offset index into the respective ring's cache element. |
| 838 | */ |
| 839 | int mhi_dev_process_ring_element(struct mhi_dev_ring *ring, uint32_t offset); |
| 840 | |
| 841 | /** |
| 842 | * mhi_dev_add_element() - Copy the element to the respective transfer rings |
| 843 | * read pointer and increment the index. |
| 844 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 845 | * @element: Transfer ring element to be copied to the host memory. |
| 846 | */ |
| 847 | int mhi_dev_add_element(struct mhi_dev_ring *ring, |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 848 | union mhi_dev_ring_element_type *element, |
| 849 | struct event_req *ereq, int evt_offset); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 850 | /** |
| 851 | * mhi_transfer_device_to_host() - memcpy equivalent API to transfer data |
| 852 | * from device to the host. |
| 853 | * @dst_pa: Physical destination address. |
| 854 | * @src: Source virtual address. |
| 855 | * @len: Numer of bytes to be transferred. |
| 856 | * @mhi: MHI dev structure. |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 857 | * @req: mhi_req structure |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 858 | */ |
| 859 | int mhi_transfer_device_to_host(uint64_t dst_pa, void *src, uint32_t len, |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 860 | struct mhi_dev *mhi, struct mhi_req *req); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 861 | |
| 862 | /** |
| 863 | * mhi_transfer_host_to_dev() - memcpy equivalent API to transfer data |
| 864 | * from host to the device. |
| 865 | * @dst: Physical destination virtual address. |
| 866 | * @src_pa: Source physical address. |
| 867 | * @len: Numer of bytes to be transferred. |
| 868 | * @mhi: MHI dev structure. |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 869 | * @req: mhi_req structure |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 870 | */ |
| 871 | int mhi_transfer_host_to_device(void *device, uint64_t src_pa, uint32_t len, |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 872 | struct mhi_dev *mhi, struct mhi_req *mreq); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 873 | |
| 874 | /** |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 875 | * mhi_dev_write_to_host() - Transfer data from device to host. |
| 876 | * Based on support available, either IPA DMA or memcpy is used. |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 877 | * @host: Host and device address details. |
| 878 | * @buf: Data buffer that needs to be written to the host. |
| 879 | * @size: Data buffer size. |
| 880 | */ |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 881 | void mhi_dev_write_to_host(struct mhi_dev *mhi, struct mhi_addr *mhi_transfer, |
| 882 | struct event_req *ereq, enum mhi_dev_transfer_type type); |
| 883 | /** |
| 884 | * mhi_dev_read_from_host() - memcpy equivalent API to transfer data |
| 885 | * from host to device. |
| 886 | * @host: Host and device address details. |
| 887 | * @buf: Data buffer that needs to be read from the host. |
| 888 | * @size: Data buffer size. |
| 889 | */ |
| 890 | void mhi_dev_read_from_host(struct mhi_dev *mhi, |
| 891 | struct mhi_addr *mhi_transfer); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 892 | |
| 893 | /** |
| 894 | * mhi_dev_read_from_host() - memcpy equivalent API to transfer data |
| 895 | * from host to device. |
| 896 | * @host: Host and device address details. |
| 897 | * @buf: Data buffer that needs to be read from the host. |
| 898 | * @size: Data buffer size. |
| 899 | */ |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 900 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 901 | void mhi_ring_set_cb(struct mhi_dev_ring *ring, |
| 902 | void (*ring_cb)(struct mhi_dev *dev, |
| 903 | union mhi_dev_ring_element_type *el, void *ctx)); |
| 904 | |
| 905 | /** |
| 906 | * mhi_ring_set_state() - Sets internal state of the ring for tracking whether |
| 907 | * a ring is being processed, idle or uninitialized. |
| 908 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 909 | * @state: state of type mhi_dev_ring_state. |
| 910 | */ |
| 911 | void mhi_ring_set_state(struct mhi_dev_ring *ring, |
| 912 | enum mhi_dev_ring_state state); |
| 913 | |
| 914 | /** |
| 915 | * mhi_ring_get_state() - Obtains the internal state of the ring. |
| 916 | * @ring: Ring for the respective context - Channel/Event/Command. |
| 917 | */ |
| 918 | enum mhi_dev_ring_state mhi_ring_get_state(struct mhi_dev_ring *ring); |
| 919 | |
| 920 | /* MMIO related functions */ |
| 921 | |
| 922 | /** |
| 923 | * mhi_dev_mmio_read() - Generic MHI MMIO register read API. |
| 924 | * @dev: MHI device structure. |
| 925 | * @offset: MHI address offset from base. |
| 926 | * @reg_val: Pointer the register value is stored to. |
| 927 | */ |
| 928 | int mhi_dev_mmio_read(struct mhi_dev *dev, uint32_t offset, |
| 929 | uint32_t *reg_value); |
| 930 | |
| 931 | /** |
| 932 | * mhi_dev_mmio_read() - Generic MHI MMIO register write API. |
| 933 | * @dev: MHI device structure. |
| 934 | * @offset: MHI address offset from base. |
| 935 | * @val: Value to be written to the register offset. |
| 936 | */ |
| 937 | int mhi_dev_mmio_write(struct mhi_dev *dev, uint32_t offset, |
| 938 | uint32_t val); |
| 939 | |
| 940 | /** |
| 941 | * mhi_dev_mmio_masked_write() - Generic MHI MMIO register write masked API. |
| 942 | * @dev: MHI device structure. |
| 943 | * @offset: MHI address offset from base. |
| 944 | * @mask: Register field mask. |
| 945 | * @shift: Register field mask shift value. |
| 946 | * @val: Value to be written to the register offset. |
| 947 | */ |
| 948 | int mhi_dev_mmio_masked_write(struct mhi_dev *dev, uint32_t offset, |
| 949 | uint32_t mask, uint32_t shift, |
| 950 | uint32_t val); |
| 951 | /** |
| 952 | * mhi_dev_mmio_masked_read() - Generic MHI MMIO register read masked API. |
| 953 | * @dev: MHI device structure. |
| 954 | * @offset: MHI address offset from base. |
| 955 | * @mask: Register field mask. |
| 956 | * @shift: Register field mask shift value. |
| 957 | * @reg_val: Pointer the register value is stored to. |
| 958 | */ |
| 959 | int mhi_dev_mmio_masked_read(struct mhi_dev *dev, uint32_t offset, |
| 960 | uint32_t mask, uint32_t shift, |
| 961 | uint32_t *reg_val); |
| 962 | /** |
| 963 | * mhi_dev_mmio_enable_ctrl_interrupt() - Enable Control interrupt. |
| 964 | * @dev: MHI device structure. |
| 965 | */ |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 966 | |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 967 | int mhi_dev_mmio_enable_ctrl_interrupt(struct mhi_dev *dev); |
| 968 | |
| 969 | /** |
| 970 | * mhi_dev_mmio_disable_ctrl_interrupt() - Disable Control interrupt. |
| 971 | * @dev: MHI device structure. |
| 972 | */ |
| 973 | int mhi_dev_mmio_disable_ctrl_interrupt(struct mhi_dev *dev); |
| 974 | |
| 975 | /** |
| 976 | * mhi_dev_mmio_read_ctrl_status_interrupt() - Read Control interrupt status. |
| 977 | * @dev: MHI device structure. |
| 978 | */ |
| 979 | int mhi_dev_mmio_read_ctrl_status_interrupt(struct mhi_dev *dev); |
| 980 | |
| 981 | /** |
| 982 | * mhi_dev_mmio_enable_cmdb_interrupt() - Enable Command doorbell interrupt. |
| 983 | * @dev: MHI device structure. |
| 984 | */ |
| 985 | int mhi_dev_mmio_enable_cmdb_interrupt(struct mhi_dev *dev); |
| 986 | |
| 987 | /** |
| 988 | * mhi_dev_mmio_disable_cmdb_interrupt() - Disable Command doorbell interrupt. |
| 989 | * @dev: MHI device structure. |
| 990 | */ |
| 991 | int mhi_dev_mmio_disable_cmdb_interrupt(struct mhi_dev *dev); |
| 992 | |
| 993 | /** |
| 994 | * mhi_dev_mmio_read_cmdb_interrupt() - Read Command doorbell status. |
| 995 | * @dev: MHI device structure. |
| 996 | */ |
| 997 | int mhi_dev_mmio_read_cmdb_status_interrupt(struct mhi_dev *dev); |
| 998 | |
| 999 | /** |
| 1000 | * mhi_dev_mmio_enable_chdb_a7() - Enable Channel doorbell for a given |
| 1001 | * channel id. |
| 1002 | * @dev: MHI device structure. |
| 1003 | * @chdb_id: Channel id number. |
| 1004 | */ |
| 1005 | int mhi_dev_mmio_enable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id); |
| 1006 | /** |
| 1007 | * mhi_dev_mmio_disable_chdb_a7() - Disable Channel doorbell for a given |
| 1008 | * channel id. |
| 1009 | * @dev: MHI device structure. |
| 1010 | * @chdb_id: Channel id number. |
| 1011 | */ |
| 1012 | int mhi_dev_mmio_disable_chdb_a7(struct mhi_dev *dev, uint32_t chdb_id); |
| 1013 | |
| 1014 | /** |
| 1015 | * mhi_dev_mmio_enable_erdb_a7() - Enable Event ring doorbell for a given |
| 1016 | * event ring id. |
| 1017 | * @dev: MHI device structure. |
| 1018 | * @erdb_id: Event ring id number. |
| 1019 | */ |
| 1020 | int mhi_dev_mmio_enable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id); |
| 1021 | |
| 1022 | /** |
| 1023 | * mhi_dev_mmio_disable_erdb_a7() - Disable Event ring doorbell for a given |
| 1024 | * event ring id. |
| 1025 | * @dev: MHI device structure. |
| 1026 | * @erdb_id: Event ring id number. |
| 1027 | */ |
| 1028 | int mhi_dev_mmio_disable_erdb_a7(struct mhi_dev *dev, uint32_t erdb_id); |
| 1029 | |
| 1030 | /** |
| 1031 | * mhi_dev_mmio_enable_chdb_interrupts() - Enable all Channel doorbell |
| 1032 | * interrupts. |
| 1033 | * @dev: MHI device structure. |
| 1034 | */ |
| 1035 | int mhi_dev_mmio_enable_chdb_interrupts(struct mhi_dev *dev); |
| 1036 | |
| 1037 | /** |
| 1038 | * mhi_dev_mmio_mask_chdb_interrupts() - Mask all Channel doorbell |
| 1039 | * interrupts. |
| 1040 | * @dev: MHI device structure. |
| 1041 | */ |
| 1042 | int mhi_dev_mmio_mask_chdb_interrupts(struct mhi_dev *dev); |
| 1043 | |
| 1044 | /** |
| 1045 | * mhi_dev_mmio_read_chdb_interrupts() - Read all Channel doorbell |
| 1046 | * interrupts. |
| 1047 | * @dev: MHI device structure. |
| 1048 | */ |
| 1049 | int mhi_dev_mmio_read_chdb_status_interrupts(struct mhi_dev *dev); |
| 1050 | |
| 1051 | /** |
| 1052 | * mhi_dev_mmio_enable_erdb_interrupts() - Enable all Event doorbell |
| 1053 | * interrupts. |
| 1054 | * @dev: MHI device structure. |
| 1055 | */ |
| 1056 | int mhi_dev_mmio_enable_erdb_interrupts(struct mhi_dev *dev); |
| 1057 | |
| 1058 | /** |
| 1059 | * mhi_dev_mmio_mask_erdb_interrupts() - Mask all Event doorbell |
| 1060 | * interrupts. |
| 1061 | * @dev: MHI device structure. |
| 1062 | */ |
| 1063 | int mhi_dev_mmio_mask_erdb_interrupts(struct mhi_dev *dev); |
| 1064 | |
| 1065 | /** |
| 1066 | * mhi_dev_mmio_read_erdb_interrupts() - Read all Event doorbell |
| 1067 | * interrupts. |
| 1068 | * @dev: MHI device structure. |
| 1069 | */ |
| 1070 | int mhi_dev_mmio_read_erdb_status_interrupts(struct mhi_dev *dev); |
| 1071 | |
| 1072 | /** |
| 1073 | * mhi_dev_mmio_clear_interrupts() - Clear all doorbell interrupts. |
| 1074 | * @dev: MHI device structure. |
| 1075 | */ |
| 1076 | int mhi_dev_mmio_clear_interrupts(struct mhi_dev *dev); |
| 1077 | |
| 1078 | /** |
| 1079 | * mhi_dev_mmio_get_chc_base() - Fetch the Channel ring context base address. |
| 1080 | @dev: MHI device structure. |
| 1081 | */ |
| 1082 | int mhi_dev_mmio_get_chc_base(struct mhi_dev *dev); |
| 1083 | |
| 1084 | /** |
| 1085 | * mhi_dev_mmio_get_erc_base() - Fetch the Event ring context base address. |
| 1086 | * @dev: MHI device structure. |
| 1087 | */ |
| 1088 | int mhi_dev_mmio_get_erc_base(struct mhi_dev *dev); |
| 1089 | |
| 1090 | /** |
| 1091 | * mhi_dev_get_crc_base() - Fetch the Command ring context base address. |
| 1092 | * @dev: MHI device structure. |
| 1093 | */ |
| 1094 | int mhi_dev_mmio_get_crc_base(struct mhi_dev *dev); |
| 1095 | |
| 1096 | /** |
| 1097 | * mhi_dev_mmio_get_ch_db() - Fetch the Write offset of the Channel ring ID. |
| 1098 | * @dev: MHI device structure. |
| 1099 | * @wr_offset: Pointer of the write offset to be written to. |
| 1100 | */ |
| 1101 | int mhi_dev_mmio_get_ch_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); |
| 1102 | |
| 1103 | /** |
| 1104 | * mhi_dev_get_erc_base() - Fetch the Write offset of the Event ring ID. |
| 1105 | * @dev: MHI device structure. |
| 1106 | * @wr_offset: Pointer of the write offset to be written to. |
| 1107 | */ |
| 1108 | int mhi_dev_mmio_get_erc_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); |
| 1109 | |
| 1110 | /** |
| 1111 | * mhi_dev_get_cmd_base() - Fetch the Write offset of the Command ring ID. |
| 1112 | * @dev: MHI device structure. |
| 1113 | * @wr_offset: Pointer of the write offset to be written to. |
| 1114 | */ |
| 1115 | int mhi_dev_mmio_get_cmd_db(struct mhi_dev_ring *ring, uint64_t *wr_offset); |
| 1116 | |
| 1117 | /** |
| 1118 | * mhi_dev_mmio_set_env() - Write the Execution Enviornment. |
| 1119 | * @dev: MHI device structure. |
| 1120 | * @value: Value of the EXEC EVN. |
| 1121 | */ |
| 1122 | int mhi_dev_mmio_set_env(struct mhi_dev *dev, uint32_t value); |
| 1123 | |
| 1124 | /** |
| 1125 | * mhi_dev_mmio_reset() - Reset the MMIO done as part of initialization. |
| 1126 | * @dev: MHI device structure. |
| 1127 | */ |
| 1128 | int mhi_dev_mmio_reset(struct mhi_dev *dev); |
| 1129 | |
| 1130 | /** |
| 1131 | * mhi_dev_get_mhi_addr() - Fetches the Data and Control region from the Host. |
| 1132 | * @dev: MHI device structure. |
| 1133 | */ |
| 1134 | int mhi_dev_get_mhi_addr(struct mhi_dev *dev); |
| 1135 | |
| 1136 | /** |
| 1137 | * mhi_dev_get_mhi_state() - Fetches the MHI state such as M0/M1/M2/M3. |
| 1138 | * @dev: MHI device structure. |
| 1139 | * @state: Pointer of type mhi_dev_state |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 1140 | * @mhi_reset: MHI device reset from host. |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 1141 | */ |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 1142 | int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state, |
| 1143 | bool *mhi_reset); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 1144 | |
| 1145 | /** |
| 1146 | * mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event |
| 1147 | * rings, support number of channels, and offsets to the Channel |
| 1148 | * and Event doorbell from the host. |
| 1149 | * @dev: MHI device structure. |
| 1150 | */ |
| 1151 | int mhi_dev_mmio_init(struct mhi_dev *dev); |
| 1152 | |
| 1153 | /** |
| 1154 | * mhi_dev_update_ner() - Update the number of event rings (NER) programmed by |
| 1155 | * the host. |
| 1156 | * @dev: MHI device structure. |
| 1157 | */ |
| 1158 | int mhi_dev_update_ner(struct mhi_dev *dev); |
| 1159 | |
| 1160 | /** |
| 1161 | * mhi_dev_restore_mmio() - Restores the MMIO when MHI device comes out of M3. |
| 1162 | * @dev: MHI device structure. |
| 1163 | */ |
| 1164 | int mhi_dev_restore_mmio(struct mhi_dev *dev); |
| 1165 | |
| 1166 | /** |
| 1167 | * mhi_dev_backup_mmio() - Backup MMIO before a MHI transition to M3. |
| 1168 | * @dev: MHI device structure. |
| 1169 | */ |
| 1170 | int mhi_dev_backup_mmio(struct mhi_dev *dev); |
| 1171 | |
| 1172 | /** |
| 1173 | * mhi_dev_dump_mmio() - Memory dump of the MMIO region for debug. |
| 1174 | * @dev: MHI device structure. |
| 1175 | */ |
| 1176 | int mhi_dev_dump_mmio(struct mhi_dev *dev); |
| 1177 | |
| 1178 | /** |
| 1179 | * mhi_dev_config_outbound_iatu() - Configure Outbound Address translation |
| 1180 | * unit between device and host to map the Data and Control |
| 1181 | * information. |
| 1182 | * @dev: MHI device structure. |
| 1183 | */ |
| 1184 | int mhi_dev_config_outbound_iatu(struct mhi_dev *mhi); |
| 1185 | |
| 1186 | /** |
| 1187 | * mhi_dev_send_state_change_event() - Send state change event to the host |
| 1188 | * such as M0/M1/M2/M3. |
| 1189 | * @dev: MHI device structure. |
| 1190 | * @state: MHI state of type mhi_dev_state |
| 1191 | */ |
| 1192 | int mhi_dev_send_state_change_event(struct mhi_dev *mhi, |
| 1193 | enum mhi_dev_state state); |
| 1194 | /** |
| 1195 | * mhi_dev_send_ee_event() - Send Execution enviornment state change |
| 1196 | * event to the host. |
| 1197 | * @dev: MHI device structure. |
| 1198 | * @state: MHI state of type mhi_dev_execenv |
| 1199 | */ |
| 1200 | int mhi_dev_send_ee_event(struct mhi_dev *mhi, |
| 1201 | enum mhi_dev_execenv exec_env); |
| 1202 | /** |
| 1203 | * mhi_dev_syserr() - System error when unexpected events are received. |
| 1204 | * @dev: MHI device structure. |
| 1205 | */ |
| 1206 | int mhi_dev_syserr(struct mhi_dev *mhi); |
| 1207 | |
| 1208 | /** |
| 1209 | * mhi_dev_suspend() - MHI device suspend to stop channel processing at the |
| 1210 | * Transfer ring boundary, update the channel state to suspended. |
| 1211 | * @dev: MHI device structure. |
| 1212 | */ |
| 1213 | int mhi_dev_suspend(struct mhi_dev *mhi); |
| 1214 | |
| 1215 | /** |
| 1216 | * mhi_dev_resume() - MHI device resume to update the channel state to running. |
| 1217 | * @dev: MHI device structure. |
| 1218 | */ |
| 1219 | int mhi_dev_resume(struct mhi_dev *mhi); |
| 1220 | |
| 1221 | /** |
| 1222 | * mhi_dev_trigger_hw_acc_wakeup() - Notify State machine there is HW |
| 1223 | * accelerated data to be send and prevent MHI suspend. |
| 1224 | * @dev: MHI device structure. |
| 1225 | */ |
| 1226 | int mhi_dev_trigger_hw_acc_wakeup(struct mhi_dev *mhi); |
| 1227 | |
| 1228 | /** |
| 1229 | * mhi_pcie_config_db_routing() - Configure Doorbell for Event and Channel |
| 1230 | * context with IPA when performing a MHI resume. |
| 1231 | * @dev: MHI device structure. |
| 1232 | */ |
| 1233 | int mhi_pcie_config_db_routing(struct mhi_dev *mhi); |
| 1234 | |
| 1235 | /** |
| 1236 | * mhi_uci_init() - Initializes the User control interface (UCI) which |
| 1237 | * exposes device nodes for the supported MHI software |
| 1238 | * channels. |
| 1239 | */ |
| 1240 | int mhi_uci_init(void); |
| 1241 | |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 1242 | /** |
| 1243 | * mhi_dev_net_interface_init() - Initializes the mhi device network interface |
| 1244 | * which exposes the virtual network interface (mhi_dev_net0). |
| 1245 | * data packets will transfer between MHI host interface (mhi_swip) |
| 1246 | * and mhi_dev_net interface using software path |
| 1247 | */ |
| 1248 | int mhi_dev_net_interface_init(void); |
| 1249 | |
| 1250 | /** |
| 1251 | * mhi_dev_net_exit() - Clean up and close MHI Network interface module. |
| 1252 | */ |
| 1253 | void mhi_dev_net_exit(void); |
| 1254 | |
| 1255 | /** |
| 1256 | * mhi_dev_notify_a7_event() - Used by PCIe driver to notify A7 MHI device |
| 1257 | * interrupt after doorbell is received. Used by PCIe driver when MHI |
| 1258 | * A7 interrupts are routed to PCIe instead of MHI device. |
| 1259 | */ |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 1260 | void mhi_dev_notify_a7_event(struct mhi_dev *mhi); |
| 1261 | |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 1262 | /** |
| 1263 | * mhi_ctrl_state_info() - Provide MHI state info |
| Siddartha Mohanadoss | ba314f2 | 2018-03-19 15:43:50 -0700 | [diff] [blame] | 1264 | * @idx: Channel number idx. Look at channel_state_info and |
| 1265 | * pass the index for the corresponding channel. |
| 1266 | * @info: Return the control info. |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 1267 | * MHI_STATE=CONFIGURED - MHI device is present but not ready |
| 1268 | * for data traffic. |
| 1269 | * MHI_STATE=CONNECTED - MHI device is ready for data transfer. |
| 1270 | * MHI_STATE=DISCONNECTED - MHI device has its pipes suspended. |
| 1271 | * exposes device nodes for the supported MHI software |
| 1272 | * channels. |
| 1273 | */ |
| Siddartha Mohanadoss | ba314f2 | 2018-03-19 15:43:50 -0700 | [diff] [blame] | 1274 | int mhi_ctrl_state_info(uint32_t idx, uint32_t *info); |
| Siddartha Mohanadoss | e095433 | 2018-01-15 14:03:03 -0800 | [diff] [blame] | 1275 | |
| 1276 | /** |
| 1277 | * uci_ctrl_update() - Update UCI once TRE's are available for clients to |
| 1278 | * consume. |
| 1279 | */ |
| 1280 | void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason); |
| 1281 | |
| 1282 | /** |
| 1283 | * mhi_register_state_cb() - Clients can register and receive callback after |
| 1284 | * MHI channel is connected or disconnected. |
| 1285 | */ |
| 1286 | int mhi_register_state_cb(void (*mhi_state_cb) |
| 1287 | (struct mhi_dev_client_cb_data *cb_data), void *data, |
| 1288 | enum mhi_client_channel channel); |
| Siddartha Mohanadoss | 603f765 | 2017-01-26 15:59:41 -0800 | [diff] [blame] | 1289 | #endif /* _MHI_H_ */ |