blob: e083d5751b904314578fb23de0d8096bb6c68555 [file] [log] [blame]
Nitesh Gupta35b8ea62019-10-31 10:45:54 +05301/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12#ifndef _MHI_H_
13#define _MHI_H_
14
15struct mhi_chan;
16struct mhi_event;
17struct mhi_ctxt;
18struct mhi_cmd;
19struct image_info;
20struct bhi_vec_entry;
21struct mhi_timesync;
22struct mhi_buf_info;
23
24/**
25 * enum MHI_CB - MHI callback
26 * @MHI_CB_IDLE: MHI entered idle state
27 * @MHI_CB_PENDING_DATA: New data available for client to process
28 * @MHI_CB_DTR_SIGNAL: DTR signaling update
29 * @MHI_CB_LPM_ENTER: MHI host entered low power mode
30 * @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
31 * @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
32 * @MHI_CB_EE_MISSION_MODE: MHI device entered Mission Mode ee
33 * @MHI_CB_SYS_ERROR: MHI device enter error state (may recover)
34 * @MHI_CB_FATAL_ERROR: MHI device entered fatal error
35 */
36enum MHI_CB {
37 MHI_CB_IDLE,
38 MHI_CB_PENDING_DATA,
39 MHI_CB_DTR_SIGNAL,
40 MHI_CB_LPM_ENTER,
41 MHI_CB_LPM_EXIT,
42 MHI_CB_EE_RDDM,
43 MHI_CB_EE_MISSION_MODE,
44 MHI_CB_SYS_ERROR,
45 MHI_CB_FATAL_ERROR,
46};
47
48/**
49 * enum MHI_DEBUG_LEVL - various debugging level
50 */
51enum MHI_DEBUG_LEVEL {
52 MHI_MSG_LVL_VERBOSE,
53 MHI_MSG_LVL_INFO,
54 MHI_MSG_LVL_ERROR,
55 MHI_MSG_LVL_CRITICAL,
56 MHI_MSG_LVL_MASK_ALL,
57};
58
59/**
60 * enum MHI_FLAGS - Transfer flags
61 * @MHI_EOB: End of buffer for bulk transfer
62 * @MHI_EOT: End of transfer
63 * @MHI_CHAIN: Linked transfer
64 */
65enum MHI_FLAGS {
66 MHI_EOB,
67 MHI_EOT,
68 MHI_CHAIN,
69};
70
71/**
72 * enum mhi_device_type - Device types
73 * @MHI_XFER_TYPE: Handles data transfer
74 * @MHI_TIMESYNC_TYPE: Use for timesync feature
75 * @MHI_CONTROLLER_TYPE: Control device
76 */
77enum mhi_device_type {
78 MHI_XFER_TYPE,
79 MHI_TIMESYNC_TYPE,
80 MHI_CONTROLLER_TYPE,
81};
82
83/**
84 * enum mhi_ee - device current execution enviornment
85 * @MHI_EE_PBL - device in PBL
86 * @MHI_EE_SBL - device in SBL
87 * @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
88 * @MHI_EE_RDDM - device in ram dump collection mode
89 * @MHI_EE_WFW - device in WLAN firmware mode
90 * @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
91 * @MHI_EE_EDL - device in emergency download mode
92 */
93enum mhi_ee {
94 MHI_EE_PBL,
95 MHI_EE_SBL,
96 MHI_EE_AMSS,
97 MHI_EE_RDDM,
98 MHI_EE_WFW,
99 MHI_EE_PTHRU,
100 MHI_EE_EDL,
101 MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
102 MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
103 MHI_EE_NOT_SUPPORTED,
104 MHI_EE_MAX,
105};
106
107/**
108 * enum mhi_dev_state - device current MHI state
109 */
110enum mhi_dev_state {
111 MHI_STATE_RESET = 0x0,
112 MHI_STATE_READY = 0x1,
113 MHI_STATE_M0 = 0x2,
114 MHI_STATE_M1 = 0x3,
115 MHI_STATE_M2 = 0x4,
116 MHI_STATE_M3 = 0x5,
117 MHI_STATE_M3_FAST = 0x6,
118 MHI_STATE_BHI = 0x7,
119 MHI_STATE_SYS_ERR = 0xFF,
120 MHI_STATE_MAX,
121};
122
123/**
124 * struct mhi_link_info - bw requirement
125 * target_link_speed - as defined by TLS bits in LinkControl reg
126 * target_link_width - as defined by NLW bits in LinkStatus reg
127 * sequence_num - used by device to track bw requests sent to host
128 */
129struct mhi_link_info {
130 unsigned int target_link_speed;
131 unsigned int target_link_width;
132 int sequence_num;
133};
134
135#define MHI_VOTE_BUS BIT(0) /* do not disable the bus */
136#define MHI_VOTE_DEVICE BIT(1) /* prevent mhi device from entering lpm */
137
138/**
139 * struct image_info - firmware and rddm table table
140 * @mhi_buf - Contain device firmware and rddm table
141 * @entries - # of entries in table
142 */
143struct image_info {
144 struct mhi_buf *mhi_buf;
145 struct bhi_vec_entry *bhi_vec;
146 u32 entries;
147};
148
149/**
150 * struct mhi_controller - Master controller structure for external modem
151 * @dev: Device associated with this controller
152 * @of_node: DT that has MHI configuration information
153 * @regs: Points to base of MHI MMIO register space
154 * @bhi: Points to base of MHI BHI register space
155 * @bhie: Points to base of MHI BHIe register space
156 * @wake_db: MHI WAKE doorbell register address
157 * @dev_id: PCIe device id of the external device
158 * @domain: PCIe domain the device connected to
159 * @bus: PCIe bus the device assigned to
160 * @slot: PCIe slot for the modem
161 * @iova_start: IOMMU starting address for data
162 * @iova_stop: IOMMU stop address for data
163 * @fw_image: Firmware image name for normal booting
164 * @edl_image: Firmware image name for emergency download mode
165 * @fbc_download: MHI host needs to do complete image transfer
166 * @rddm_size: RAM dump size that host should allocate for debugging purpose
167 * @sbl_size: SBL image size
168 * @seg_len: BHIe vector size
169 * @fbc_image: Points to firmware image buffer
170 * @rddm_image: Points to RAM dump buffer
171 * @max_chan: Maximum number of channels controller support
172 * @mhi_chan: Points to channel configuration table
173 * @lpm_chans: List of channels that require LPM notifications
174 * @total_ev_rings: Total # of event rings allocated
175 * @hw_ev_rings: Number of hardware event rings
176 * @sw_ev_rings: Number of software event rings
177 * @msi_required: Number of msi required to operate
178 * @msi_allocated: Number of msi allocated by bus master
179 * @irq: base irq # to request
180 * @mhi_event: MHI event ring configurations table
181 * @mhi_cmd: MHI command ring configurations table
182 * @mhi_ctxt: MHI device context, shared memory between host and device
183 * @timeout_ms: Timeout in ms for state transitions
184 * @pm_state: Power management state
185 * @ee: MHI device execution environment
186 * @dev_state: MHI STATE
187 * @mhi_link_info: requested link bandwidth by device
188 * @status_cb: CB function to notify various power states to but master
189 * @link_status: Query link status in case of abnormal value read from device
190 * @runtime_get: Async runtime resume function
191 * @runtimet_put: Release votes
192 * @time_get: Return host time in us
193 * @lpm_disable: Request controller to disable link level low power modes
194 * @lpm_enable: Controller may enable link level low power modes again
195 * @priv_data: Points to bus master's private data
196 */
197struct mhi_controller {
198 struct list_head node;
199 struct mhi_device *mhi_dev;
200
201 /* device node for iommu ops */
202 struct device *dev;
203 struct device_node *of_node;
204
205 /* mmio base */
206 phys_addr_t base_addr;
207 void __iomem *regs;
208 void __iomem *bhi;
209 void __iomem *bhie;
210 void __iomem *wake_db;
211 void __iomem *bw_scale_db;
212
213 /* device topology */
214 u32 dev_id;
215 u32 domain;
216 u32 bus;
217 u32 slot;
218 u32 family_number;
219 u32 device_number;
220 u32 major_version;
221 u32 minor_version;
222
223 /* addressing window */
224 dma_addr_t iova_start;
225 dma_addr_t iova_stop;
226
227 /* fw images */
228 const char *fw_image;
229 const char *edl_image;
230
231 /* mhi host manages downloading entire fbc images */
232 bool fbc_download;
233 size_t rddm_size;
234 size_t sbl_size;
235 size_t seg_len;
236 u32 session_id;
237 u32 sequence_id;
238 struct image_info *fbc_image;
239 struct image_info *rddm_image;
240
241 /* physical channel config data */
242 u32 max_chan;
243 struct mhi_chan *mhi_chan;
244 struct list_head lpm_chans; /* these chan require lpm notification */
245
246 /* physical event config data */
247 u32 total_ev_rings;
248 u32 hw_ev_rings;
249 u32 sw_ev_rings;
250 u32 msi_required;
251 u32 msi_allocated;
252 int *irq; /* interrupt table */
253 struct mhi_event *mhi_event;
254 struct list_head lp_ev_rings; /* low priority event rings */
255
256 /* cmd rings */
257 struct mhi_cmd *mhi_cmd;
258
259 /* mhi context (shared with device) */
260 struct mhi_ctxt *mhi_ctxt;
261
262 u32 timeout_ms;
263
264 /* caller should grab pm_mutex for suspend/resume operations */
265 struct mutex pm_mutex;
266 bool pre_init;
267 rwlock_t pm_lock;
268 u32 pm_state;
269 u32 saved_pm_state; /* saved state during fast suspend */
270 u32 db_access; /* db access only on these states */
271 enum mhi_ee ee;
272 u32 ee_table[MHI_EE_MAX]; /* ee conversion from dev to host */
273 enum mhi_dev_state dev_state;
274 enum mhi_dev_state saved_dev_state;
275 bool wake_set;
276 atomic_t dev_wake;
277 atomic_t alloc_size;
278 atomic_t pending_pkts;
279 struct list_head transition_list;
280 spinlock_t transition_lock;
281 spinlock_t wlock;
282
283 /* target bandwidth info */
284 struct mhi_link_info mhi_link_info;
285
286 /* debug counters */
287 u32 M0, M2, M3, M3_FAST;
288
289 /* worker for different state transitions */
290 struct work_struct st_worker;
291 struct work_struct fw_worker;
292 struct work_struct syserr_worker;
293 struct work_struct low_priority_worker;
294 wait_queue_head_t state_event;
295
296 /* shadow functions */
297 void (*status_cb)(struct mhi_controller *, void *, enum MHI_CB);
298 int (*link_status)(struct mhi_controller *, void *);
299 void (*wake_get)(struct mhi_controller *, bool);
300 void (*wake_put)(struct mhi_controller *, bool);
301 void (*wake_toggle)(struct mhi_controller *mhi_cntrl);
302 int (*runtime_get)(struct mhi_controller *, void *);
303 void (*runtime_put)(struct mhi_controller *, void *);
304 u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
305 int (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
306 int (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
307 int (*map_single)(struct mhi_controller *mhi_cntrl,
308 struct mhi_buf_info *buf);
309 void (*unmap_single)(struct mhi_controller *mhi_cntrl,
310 struct mhi_buf_info *buf);
311 void (*tsync_log)(struct mhi_controller *mhi_cntrl, u64 remote_time);
312 int (*bw_scale)(struct mhi_controller *mhi_cntrl,
313 struct mhi_link_info *link_info);
314
315 /* channel to control DTR messaging */
316 struct mhi_device *dtr_dev;
317
318 /* bounce buffer settings */
319 bool bounce_buf;
320 size_t buffer_len;
321
322 /* supports time sync feature */
323 struct mhi_timesync *mhi_tsync;
324 struct mhi_device *tsync_dev;
325 u64 local_timer_freq;
326 u64 remote_timer_freq;
327
328 /* kernel log level */
329 enum MHI_DEBUG_LEVEL klog_lvl;
330
331 /* private log level controller driver to set */
332 enum MHI_DEBUG_LEVEL log_lvl;
333
334 /* controller specific data */
335 void *priv_data;
336 void *log_buf;
337 struct dentry *dentry;
338 struct dentry *parent;
339};
340
341/**
342 * struct mhi_device - mhi device structure associated bind to channel
343 * @dev: Device associated with the channels
344 * @mtu: Maximum # of bytes controller support
345 * @ul_chan_id: MHI channel id for UL transfer
346 * @dl_chan_id: MHI channel id for DL transfer
347 * @tiocm: Device current terminal settings
348 * @early_notif: This device needs an early notification in case of error
349 * with external modem.
350 * @dev_vote: Keep external device in active state
351 * @bus_vote: Keep physical bus (pci, spi) in active state
352 * @priv: Driver private data
353 */
354struct mhi_device {
355 struct device dev;
356 u32 dev_id;
357 u32 domain;
358 u32 bus;
359 u32 slot;
360 size_t mtu;
361 int ul_chan_id;
362 int dl_chan_id;
363 int ul_event_id;
364 int dl_event_id;
365 u32 tiocm;
366 bool early_notif;
367 const struct mhi_device_id *id;
368 const char *chan_name;
369 struct mhi_controller *mhi_cntrl;
370 struct mhi_chan *ul_chan;
371 struct mhi_chan *dl_chan;
372 atomic_t dev_vote;
373 atomic_t bus_vote;
374 enum mhi_device_type dev_type;
375 void *priv_data;
376 int (*ul_xfer)(struct mhi_device *, struct mhi_chan *, void *,
377 size_t, enum MHI_FLAGS);
378 int (*dl_xfer)(struct mhi_device *, struct mhi_chan *, void *,
379 size_t, enum MHI_FLAGS);
380 void (*status_cb)(struct mhi_device *, enum MHI_CB);
381};
382
383/**
384 * struct mhi_result - Completed buffer information
385 * @buf_addr: Address of data buffer
386 * @dir: Channel direction
387 * @bytes_xfer: # of bytes transferred
388 * @transaction_status: Status of last trasnferred
389 */
390struct mhi_result {
391 void *buf_addr;
392 enum dma_data_direction dir;
393 size_t bytes_xferd;
394 int transaction_status;
395};
396
397/**
398 * struct mhi_buf - Describes the buffer
399 * @page: buffer as a page
400 * @buf: cpu address for the buffer
401 * @phys_addr: physical address of the buffer
402 * @dma_addr: iommu address for the buffer
403 * @skb: skb of ip packet
404 * @len: # of bytes
405 * @name: Buffer label, for offload channel configurations name must be:
406 * ECA - Event context array data
407 * CCA - Channel context array data
408 */
409struct mhi_buf {
410 struct list_head node;
411 struct page *page;
412 void *buf;
413 phys_addr_t phys_addr;
414 dma_addr_t dma_addr;
415 struct sk_buff *skb;
416 size_t len;
417 const char *name; /* ECA, CCA */
418};
419
420/**
421 * struct mhi_driver - mhi driver information
422 * @id_table: NULL terminated channel ID names
423 * @ul_xfer_cb: UL data transfer callback
424 * @dl_xfer_cb: DL data transfer callback
425 * @status_cb: Asynchronous status callback
426 */
427struct mhi_driver {
428 const struct mhi_device_id *id_table;
429 int (*probe)(struct mhi_device *, const struct mhi_device_id *id);
430 void (*remove)(struct mhi_device *);
431 void (*ul_xfer_cb)(struct mhi_device *, struct mhi_result *);
432 void (*dl_xfer_cb)(struct mhi_device *, struct mhi_result *);
433 void (*status_cb)(struct mhi_device *, enum MHI_CB mhi_cb);
434 struct device_driver driver;
435};
436
437#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
438#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
439
440static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
441 void *priv)
442{
443 mhi_dev->priv_data = priv;
444}
445
446static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
447{
448 return mhi_dev->priv_data;
449}
450
451/**
452 * mhi_queue_transfer - Queue a buffer to hardware
453 * All transfers are asyncronous transfers
454 * @mhi_dev: Device associated with the channels
455 * @dir: Data direction
456 * @buf: Data buffer (skb for hardware channels)
457 * @len: Size in bytes
458 * @mflags: Interrupt flags for the device
459 */
460static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
461 enum dma_data_direction dir,
462 void *buf,
463 size_t len,
464 enum MHI_FLAGS mflags)
465{
466 if (dir == DMA_TO_DEVICE)
467 return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
468 mflags);
469 else
470 return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
471 mflags);
472}
473
474static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
475{
476 return mhi_cntrl->priv_data;
477}
478
479static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
480{
481 kfree(mhi_cntrl);
482}
483
484/**
485 * mhi_driver_register - Register driver with MHI framework
486 * @mhi_drv: mhi_driver structure
487 */
488int mhi_driver_register(struct mhi_driver *mhi_drv);
489
490/**
491 * mhi_driver_unregister - Unregister a driver for mhi_devices
492 * @mhi_drv: mhi_driver structure
493 */
494void mhi_driver_unregister(struct mhi_driver *mhi_drv);
495
496/**
497 * mhi_device_configure - configure ECA or CCA context
498 * For offload channels that client manage, call this
499 * function to configure channel context or event context
500 * array associated with the channel
501 * @mhi_div: Device associated with the channels
502 * @dir: Direction of the channel
503 * @mhi_buf: Configuration data
504 * @elements: # of configuration elements
505 */
506int mhi_device_configure(struct mhi_device *mhi_div,
507 enum dma_data_direction dir,
508 struct mhi_buf *mhi_buf,
509 int elements);
510
511/**
512 * mhi_device_get - disable low power modes
513 * Only disables lpm, does not immediately exit low power mode
514 * if controller already in a low power mode
515 * @mhi_dev: Device associated with the channels
516 * @vote: requested vote (bus, device or both)
517 */
518void mhi_device_get(struct mhi_device *mhi_dev, int vote);
519
520/**
521 * mhi_device_get_sync - disable low power modes
522 * Synchronously disable device & or bus low power, exit low power mode if
523 * controller already in a low power state
524 * @mhi_dev: Device associated with the channels
525 * @vote: requested vote (bus, device or both)
526 */
527int mhi_device_get_sync(struct mhi_device *mhi_dev, int vote);
528
529/**
530 * mhi_device_put - re-enable low power modes
531 * @mhi_dev: Device associated with the channels
532 * @vote: vote to remove
533 */
534void mhi_device_put(struct mhi_device *mhi_dev, int vote);
535
536/**
537 * mhi_prepare_for_transfer - setup channel for data transfer
538 * Moves both UL and DL channel from RESET to START state
539 * @mhi_dev: Device associated with the channels
540 */
541int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
542
543/**
544 * mhi_unprepare_from_transfer -unprepare the channels
545 * Moves both UL and DL channels to RESET state
546 * @mhi_dev: Device associated with the channels
547 */
548void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
549
550/**
551 * mhi_get_no_free_descriptors - Get transfer ring length
552 * Get # of TD available to queue buffers
553 * @mhi_dev: Device associated with the channels
554 * @dir: Direction of the channel
555 */
556int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
557 enum dma_data_direction dir);
558
559/**
560 * mhi_poll - poll for any available data to consume
561 * This is only applicable for DL direction
562 * @mhi_dev: Device associated with the channels
563 * @budget: In descriptors to service before returning
564 */
565int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
566
567/**
568 * mhi_ioctl - user space IOCTL support for MHI channels
569 * Native support for setting TIOCM
570 * @mhi_dev: Device associated with the channels
571 * @cmd: IOCTL cmd
572 * @arg: Optional parameter, iotcl cmd specific
573 */
574long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
575
576/**
577 * mhi_alloc_controller - Allocate mhi_controller structure
578 * Allocate controller structure and additional data for controller
579 * private data. You may get the private data pointer by calling
580 * mhi_controller_get_devdata
581 * @size: # of additional bytes to allocate
582 */
583struct mhi_controller *mhi_alloc_controller(size_t size);
584
585/**
586 * of_register_mhi_controller - Register MHI controller
587 * Registers MHI controller with MHI bus framework. DT must be supported
588 * @mhi_cntrl: MHI controller to register
589 */
590int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
591
592void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
593
594/**
595 * mhi_bdf_to_controller - Look up a registered controller
596 * Search for controller based on device identification
597 * @domain: RC domain of the device
598 * @bus: Bus device connected to
599 * @slot: Slot device assigned to
600 * @dev_id: Device Identification
601 */
602struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
603 u32 dev_id);
604
605/**
606 * mhi_prepare_for_power_up - Do pre-initialization before power up
607 * This is optional, call this before power up if controller do not
608 * want bus framework to automatically free any allocated memory during shutdown
609 * process.
610 * @mhi_cntrl: MHI controller
611 */
612int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
613
614/**
615 * mhi_async_power_up - Starts MHI power up sequence
616 * @mhi_cntrl: MHI controller
617 */
618int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
619int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
620
621/**
622 * mhi_power_down - Start MHI power down sequence
623 * @mhi_cntrl: MHI controller
624 * @graceful: link is still accessible, do a graceful shutdown process otherwise
625 * we will shutdown host w/o putting device into RESET state
626 */
627void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
628
629/**
630 * mhi_unprepare_after_powre_down - free any allocated memory for power up
631 * @mhi_cntrl: MHI controller
632 */
633void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
634
635/**
636 * mhi_pm_suspend - Move MHI into a suspended state
637 * Transition to MHI state M3 state from M0||M1||M2 state
638 * @mhi_cntrl: MHI controller
639 */
640int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
641
642/**
643 * mhi_pm_fast_suspend - Move host into suspend state while keeping
644 * the device in active state.
645 * @mhi_cntrl: MHI controller
646 * @notify_client: if true, clients will get a notification about lpm transition
647 */
648int mhi_pm_fast_suspend(struct mhi_controller *mhi_cntrl, bool notify_client);
649
650/**
651 * mhi_pm_resume - Resume MHI from suspended state
652 * Transition to MHI state M0 state from M3 state
653 * @mhi_cntrl: MHI controller
654 */
655int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
656
657/**
658 * mhi_pm_fast_resume - Move host into resume state from fast suspend state
659 * @mhi_cntrl: MHI controller
660 * @notify_client: if true, clients will get a notification about lpm transition
661 */
662int mhi_pm_fast_resume(struct mhi_controller *mhi_cntrl, bool notify_client);
663
664/**
665 * mhi_download_rddm_img - Download ramdump image from device for
666 * debugging purpose.
667 * @mhi_cntrl: MHI controller
668 * @in_panic: If we trying to capture image while in kernel panic
669 */
670int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
671
672/**
673 * mhi_force_rddm_mode - Force external device into rddm mode
674 * to collect device ramdump. This is useful if host driver assert
675 * and we need to see device state as well.
676 * @mhi_cntrl: MHI controller
677 */
678int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
679
680/**
681 * mhi_get_remote_time_sync - Get external soc time relative to local soc time
682 * using MMIO method.
683 * @mhi_dev: Device associated with the channels
684 * @t_host: Pointer to output local soc time
685 * @t_dev: Pointer to output remote soc time
686 */
687int mhi_get_remote_time_sync(struct mhi_device *mhi_dev,
688 u64 *t_host,
689 u64 *t_dev);
690
691/**
692 * mhi_get_mhi_state - Return MHI state of device
693 * @mhi_cntrl: MHI controller
694 */
695enum mhi_dev_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
696
697/**
698 * mhi_set_mhi_state - Set device state
699 * @mhi_cntrl: MHI controller
700 * @state: state to set
701 */
702void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
703 enum mhi_dev_state state);
704
705
706/**
707 * mhi_is_active - helper function to determine if MHI in active state
708 * @mhi_dev: client device
709 */
710static inline bool mhi_is_active(struct mhi_device *mhi_dev)
711{
712 struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
713
714 return (mhi_cntrl->dev_state >= MHI_STATE_M0 &&
715 mhi_cntrl->dev_state <= MHI_STATE_M3_FAST);
716}
717
718/**
719 * mhi_control_error - MHI controller went into unrecoverable error state.
720 * Will transition MHI into Linkdown state. Do not call from atomic
721 * context.
722 * @mhi_cntrl: MHI controller
723 */
724void mhi_control_error(struct mhi_controller *mhi_cntrl);
725
726/**
727 * mhi_debug_reg_dump - dump MHI registers for debug purpose
728 * @mhi_cntrl: MHI controller
729 */
730void mhi_debug_reg_dump(struct mhi_controller *mhi_cntrl);
731
732#ifndef CONFIG_ARCH_QCOM
733
734#ifdef CONFIG_MHI_DEBUG
735
736#define MHI_VERB(fmt, ...) do { \
737 if (mhi_cntrl->klog_lvl <= MHI_MSG_VERBOSE) \
738 pr_dbg("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
739} while (0)
740
741#else
742
743#define MHI_VERB(fmt, ...)
744
745#endif
746
747#define MHI_LOG(fmt, ...) do { \
748 if (mhi_cntrl->klog_lvl <= MHI_MSG_INFO) \
749 pr_info("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
750} while (0)
751
752#define MHI_ERR(fmt, ...) do { \
753 if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
754 pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
755} while (0)
756
757#define MHI_CRITICAL(fmt, ...) do { \
758 if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
759 pr_alert("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
760} while (0)
761
762#else /* ARCH QCOM */
763
764#include <linux/ipc_logging.h>
765
766#ifdef CONFIG_MHI_DEBUG
767
768#define MHI_VERB(fmt, ...) do { \
769 if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
770 pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
771 if (mhi_cntrl->log_buf && \
772 (mhi_cntrl->log_lvl <= MHI_MSG_LVL_VERBOSE)) \
773 ipc_log_string(mhi_cntrl->log_buf, "[D][%s] " fmt, \
774 __func__, ##__VA_ARGS__); \
775} while (0)
776
777#else
778
779#define MHI_VERB(fmt, ...)
780
781#endif
782
783#define MHI_LOG(fmt, ...) do { \
784 if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
785 pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
786 if (mhi_cntrl->log_buf && \
787 (mhi_cntrl->log_lvl <= MHI_MSG_LVL_INFO)) \
788 ipc_log_string(mhi_cntrl->log_buf, "[I][%s] " fmt, \
789 __func__, ##__VA_ARGS__); \
790} while (0)
791
792#define MHI_ERR(fmt, ...) do { \
793 if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
794 pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
795 if (mhi_cntrl->log_buf && \
796 (mhi_cntrl->log_lvl <= MHI_MSG_LVL_ERROR)) \
797 ipc_log_string(mhi_cntrl->log_buf, "[E][%s] " fmt, \
798 __func__, ##__VA_ARGS__); \
799} while (0)
800
801#define MHI_CRITICAL(fmt, ...) do { \
802 if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
803 pr_err("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
804 if (mhi_cntrl->log_buf && \
805 (mhi_cntrl->log_lvl <= MHI_MSG_LVL_CRITICAL)) \
806 ipc_log_string(mhi_cntrl->log_buf, "[C][%s] " fmt, \
807 __func__, ##__VA_ARGS__); \
808} while (0)
809
810#endif
811
812#endif /* _MHI_H_ */