blob: 8e3ab0b171430392de86fb3774f43c182bd6fc40 [file] [log] [blame]
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
#ifndef _MHI_H_
#define _MHI_H_
struct mhi_chan;
struct mhi_event;
struct mhi_ctxt;
struct mhi_cmd;
struct image_info;
struct bhi_vec_entry;
struct mhi_timesync;
struct mhi_buf_info;
/**
* enum MHI_CB - MHI callback
* @MHI_CB_IDLE: MHI entered idle state
* @MHI_CB_PENDING_DATA: New data available for client to process
* @MHI_CB_LPM_ENTER: MHI host entered low power mode
* @MHI_CB_LPM_EXIT: MHI host about to exit low power mode
* @MHI_CB_EE_RDDM: MHI device entered RDDM execution enviornment
*/
enum MHI_CB {
MHI_CB_IDLE,
MHI_CB_PENDING_DATA,
MHI_CB_LPM_ENTER,
MHI_CB_LPM_EXIT,
MHI_CB_EE_RDDM,
};
/**
* enum MHI_DEBUG_LEVL - various debugging level
*/
enum MHI_DEBUG_LEVEL {
MHI_MSG_LVL_VERBOSE,
MHI_MSG_LVL_INFO,
MHI_MSG_LVL_ERROR,
MHI_MSG_LVL_CRITICAL,
MHI_MSG_LVL_MASK_ALL,
};
/**
* enum MHI_FLAGS - Transfer flags
* @MHI_EOB: End of buffer for bulk transfer
* @MHI_EOT: End of transfer
* @MHI_CHAIN: Linked transfer
*/
enum MHI_FLAGS {
MHI_EOB,
MHI_EOT,
MHI_CHAIN,
};
/**
* enum mhi_device_type - Device types
* @MHI_XFER_TYPE: Handles data transfer
* @MHI_TIMESYNC_TYPE: Use for timesync feature
* @MHI_CONTROLLER_TYPE: Control device
*/
enum mhi_device_type {
MHI_XFER_TYPE,
MHI_TIMESYNC_TYPE,
MHI_CONTROLLER_TYPE,
};
/**
* enum mhi_ee - device current execution enviornment
* @MHI_EE_PBL - device in PBL
* @MHI_EE_SBL - device in SBL
* @MHI_EE_AMSS - device in mission mode (firmware fully loaded)
* @MHI_EE_RDDM - device in ram dump collection mode
* @MHI_EE_WFW - device in WLAN firmware mode
* @MHI_EE_PTHRU - device in PBL but configured in pass thru mode
* @MHI_EE_EDL - device in emergency download mode
*/
enum mhi_ee {
MHI_EE_PBL = 0x0,
MHI_EE_SBL = 0x1,
MHI_EE_AMSS = 0x2,
MHI_EE_RDDM = 0x3,
MHI_EE_WFW = 0x4,
MHI_EE_PTHRU = 0x5,
MHI_EE_EDL = 0x6,
MHI_EE_MAX_SUPPORTED = MHI_EE_EDL,
MHI_EE_DISABLE_TRANSITION, /* local EE, not related to mhi spec */
MHI_EE_MAX,
};
/**
* enum mhi_dev_state - device current MHI state
*/
enum mhi_dev_state {
MHI_STATE_RESET = 0x0,
MHI_STATE_READY = 0x1,
MHI_STATE_M0 = 0x2,
MHI_STATE_M1 = 0x3,
MHI_STATE_M2 = 0x4,
MHI_STATE_M3 = 0x5,
MHI_STATE_BHI = 0x7,
MHI_STATE_SYS_ERR = 0xFF,
MHI_STATE_MAX,
};
/**
* struct image_info - firmware and rddm table table
* @mhi_buf - Contain device firmware and rddm table
* @entries - # of entries in table
*/
struct image_info {
struct mhi_buf *mhi_buf;
struct bhi_vec_entry *bhi_vec;
u32 entries;
};
/**
* struct mhi_controller - Master controller structure for external modem
* @dev: Device associated with this controller
* @of_node: DT that has MHI configuration information
* @regs: Points to base of MHI MMIO register space
* @bhi: Points to base of MHI BHI register space
* @bhie: Points to base of MHI BHIe register space
* @wake_db: MHI WAKE doorbell register address
* @dev_id: PCIe device id of the external device
* @domain: PCIe domain the device connected to
* @bus: PCIe bus the device assigned to
* @slot: PCIe slot for the modem
* @iova_start: IOMMU starting address for data
* @iova_stop: IOMMU stop address for data
* @fw_image: Firmware image name for normal booting
* @edl_image: Firmware image name for emergency download mode
* @fbc_download: MHI host needs to do complete image transfer
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size
* @seg_len: BHIe vector size
* @fbc_image: Points to firmware image buffer
* @rddm_image: Points to RAM dump buffer
* @max_chan: Maximum number of channels controller support
* @mhi_chan: Points to channel configuration table
* @lpm_chans: List of channels that require LPM notifications
* @total_ev_rings: Total # of event rings allocated
* @hw_ev_rings: Number of hardware event rings
* @sw_ev_rings: Number of software event rings
* @msi_required: Number of msi required to operate
* @msi_allocated: Number of msi allocated by bus master
* @irq: base irq # to request
* @mhi_event: MHI event ring configurations table
* @mhi_cmd: MHI command ring configurations table
* @mhi_ctxt: MHI device context, shared memory between host and device
* @timeout_ms: Timeout in ms for state transitions
* @pm_state: Power management state
* @ee: MHI device execution environment
* @dev_state: MHI STATE
* @status_cb: CB function to notify various power states to but master
* @link_status: Query link status in case of abnormal value read from device
* @runtime_get: Async runtime resume function
* @runtimet_put: Release votes
* @time_get: Return host time in us
* @lpm_disable: Request controller to disable link level low power modes
* @lpm_enable: Controller may enable link level low power modes again
* @priv_data: Points to bus master's private data
*/
struct mhi_controller {
struct list_head node;
struct mhi_device *mhi_dev;
/* device node for iommu ops */
struct device *dev;
struct device_node *of_node;
/* mmio base */
void __iomem *regs;
void __iomem *bhi;
void __iomem *bhie;
void __iomem *wake_db;
/* device topology */
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
/* addressing window */
dma_addr_t iova_start;
dma_addr_t iova_stop;
/* fw images */
const char *fw_image;
const char *edl_image;
/* mhi host manages downloading entire fbc images */
bool fbc_download;
size_t rddm_size;
size_t sbl_size;
size_t seg_len;
u32 session_id;
u32 sequence_id;
struct image_info *fbc_image;
struct image_info *rddm_image;
/* physical channel config data */
u32 max_chan;
struct mhi_chan *mhi_chan;
struct list_head lpm_chans; /* these chan require lpm notification */
/* physical event config data */
u32 total_ev_rings;
u32 hw_ev_rings;
u32 sw_ev_rings;
u32 msi_required;
u32 msi_allocated;
int *irq; /* interrupt table */
struct mhi_event *mhi_event;
/* cmd rings */
struct mhi_cmd *mhi_cmd;
/* mhi context (shared with device) */
struct mhi_ctxt *mhi_ctxt;
u32 timeout_ms;
/* caller should grab pm_mutex for suspend/resume operations */
struct mutex pm_mutex;
bool pre_init;
rwlock_t pm_lock;
u32 pm_state;
enum mhi_ee ee;
enum mhi_dev_state dev_state;
bool wake_set;
atomic_t dev_wake;
atomic_t alloc_size;
struct list_head transition_list;
spinlock_t transition_lock;
spinlock_t wlock;
/* debug counters */
u32 M0, M2, M3;
/* worker for different state transitions */
struct work_struct st_worker;
struct work_struct fw_worker;
struct work_struct syserr_worker;
wait_queue_head_t state_event;
/* shadow functions */
void (*status_cb)(struct mhi_controller *mhi_cntrl, void *priv,
enum MHI_CB reason);
int (*link_status)(struct mhi_controller *mhi_cntrl, void *priv);
void (*wake_get)(struct mhi_controller *mhi_cntrl, bool override);
void (*wake_put)(struct mhi_controller *mhi_cntrl, bool override);
int (*runtime_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*runtime_put)(struct mhi_controller *mhi_cntrl, void *priv);
u64 (*time_get)(struct mhi_controller *mhi_cntrl, void *priv);
void (*lpm_disable)(struct mhi_controller *mhi_cntrl, void *priv);
void (*lpm_enable)(struct mhi_controller *mhi_cntrl, void *priv);
int (*map_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
void (*unmap_single)(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf);
/* channel to control DTR messaging */
struct mhi_device *dtr_dev;
/* bounce buffer settings */
bool bounce_buf;
size_t buffer_len;
/* supports time sync feature */
bool time_sync;
struct mhi_timesync *mhi_tsync;
struct mhi_device *tsync_dev;
/* kernel log level */
enum MHI_DEBUG_LEVEL klog_lvl;
/* private log level controller driver to set */
enum MHI_DEBUG_LEVEL log_lvl;
/* controller specific data */
void *priv_data;
void *log_buf;
struct dentry *dentry;
struct dentry *parent;
};
/**
* struct mhi_device - mhi device structure associated bind to channel
* @dev: Device associated with the channels
* @mtu: Maximum # of bytes controller support
* @ul_chan_id: MHI channel id for UL transfer
* @dl_chan_id: MHI channel id for DL transfer
* @tiocm: Device current terminal settings
* @priv: Driver private data
*/
struct mhi_device {
struct device dev;
u32 dev_id;
u32 domain;
u32 bus;
u32 slot;
size_t mtu;
int ul_chan_id;
int dl_chan_id;
int ul_event_id;
int dl_event_id;
u32 tiocm;
const struct mhi_device_id *id;
const char *chan_name;
struct mhi_controller *mhi_cntrl;
struct mhi_chan *ul_chan;
struct mhi_chan *dl_chan;
atomic_t dev_wake;
enum mhi_device_type dev_type;
void *priv_data;
int (*ul_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t len, enum MHI_FLAGS flags);
int (*dl_xfer)(struct mhi_device *mhi_dev, struct mhi_chan *mhi_chan,
void *buf, size_t size, enum MHI_FLAGS flags);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB reason);
};
/**
* struct mhi_result - Completed buffer information
* @buf_addr: Address of data buffer
* @dir: Channel direction
* @bytes_xfer: # of bytes transferred
* @transaction_status: Status of last trasnferred
*/
struct mhi_result {
void *buf_addr;
enum dma_data_direction dir;
size_t bytes_xferd;
int transaction_status;
};
/**
* struct mhi_buf - Describes the buffer
* @buf: cpu address for the buffer
* @phys_addr: physical address of the buffer
* @dma_addr: iommu address for the buffer
* @len: # of bytes
* @name: Buffer label, for offload channel configurations name must be:
* ECA - Event context array data
* CCA - Channel context array data
*/
struct mhi_buf {
void *buf;
phys_addr_t phys_addr;
dma_addr_t dma_addr;
size_t len;
const char *name; /* ECA, CCA */
};
/**
* struct mhi_driver - mhi driver information
* @id_table: NULL terminated channel ID names
* @ul_xfer_cb: UL data transfer callback
* @dl_xfer_cb: DL data transfer callback
* @status_cb: Asynchronous status callback
*/
struct mhi_driver {
const struct mhi_device_id *id_table;
int (*probe)(struct mhi_device *mhi_dev,
const struct mhi_device_id *id);
void (*remove)(struct mhi_device *mhi_dev);
void (*ul_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*dl_xfer_cb)(struct mhi_device *mhi_dev, struct mhi_result *res);
void (*status_cb)(struct mhi_device *mhi_dev, enum MHI_CB mhi_cb);
struct device_driver driver;
};
#define to_mhi_driver(drv) container_of(drv, struct mhi_driver, driver)
#define to_mhi_device(dev) container_of(dev, struct mhi_device, dev)
static inline void mhi_device_set_devdata(struct mhi_device *mhi_dev,
void *priv)
{
mhi_dev->priv_data = priv;
}
static inline void *mhi_device_get_devdata(struct mhi_device *mhi_dev)
{
return mhi_dev->priv_data;
}
/**
* mhi_queue_transfer - Queue a buffer to hardware
* All transfers are asyncronous transfers
* @mhi_dev: Device associated with the channels
* @dir: Data direction
* @buf: Data buffer (skb for hardware channels)
* @len: Size in bytes
* @mflags: Interrupt flags for the device
*/
static inline int mhi_queue_transfer(struct mhi_device *mhi_dev,
enum dma_data_direction dir,
void *buf,
size_t len,
enum MHI_FLAGS mflags)
{
if (dir == DMA_TO_DEVICE)
return mhi_dev->ul_xfer(mhi_dev, mhi_dev->ul_chan, buf, len,
mflags);
else
return mhi_dev->dl_xfer(mhi_dev, mhi_dev->dl_chan, buf, len,
mflags);
}
static inline void *mhi_controller_get_devdata(struct mhi_controller *mhi_cntrl)
{
return mhi_cntrl->priv_data;
}
static inline void mhi_free_controller(struct mhi_controller *mhi_cntrl)
{
kfree(mhi_cntrl);
}
/**
* mhi_driver_register - Register driver with MHI framework
* @mhi_drv: mhi_driver structure
*/
int mhi_driver_register(struct mhi_driver *mhi_drv);
/**
* mhi_driver_unregister - Unregister a driver for mhi_devices
* @mhi_drv: mhi_driver structure
*/
void mhi_driver_unregister(struct mhi_driver *mhi_drv);
/**
* mhi_device_configure - configure ECA or CCA context
* For offload channels that client manage, call this
* function to configure channel context or event context
* array associated with the channel
* @mhi_div: Device associated with the channels
* @dir: Direction of the channel
* @mhi_buf: Configuration data
* @elements: # of configuration elements
*/
int mhi_device_configure(struct mhi_device *mhi_div,
enum dma_data_direction dir,
struct mhi_buf *mhi_buf,
int elements);
/**
* mhi_device_get - disable all low power modes
* Only disables lpm, does not immediately exit low power mode
* if controller already in a low power mode
* @mhi_dev: Device associated with the channels
*/
void mhi_device_get(struct mhi_device *mhi_dev);
/**
* mhi_device_get_sync - disable all low power modes
* Synchronously disable all low power, exit low power mode if
* controller already in a low power state
* @mhi_dev: Device associated with the channels
*/
int mhi_device_get_sync(struct mhi_device *mhi_dev);
/**
* mhi_device_put - re-enable low power modes
* @mhi_dev: Device associated with the channels
*/
void mhi_device_put(struct mhi_device *mhi_dev);
/**
* mhi_prepare_for_transfer - setup channel for data transfer
* Moves both UL and DL channel from RESET to START state
* @mhi_dev: Device associated with the channels
*/
int mhi_prepare_for_transfer(struct mhi_device *mhi_dev);
/**
* mhi_unprepare_from_transfer -unprepare the channels
* Moves both UL and DL channels to RESET state
* @mhi_dev: Device associated with the channels
*/
void mhi_unprepare_from_transfer(struct mhi_device *mhi_dev);
/**
* mhi_get_no_free_descriptors - Get transfer ring length
* Get # of TD available to queue buffers
* @mhi_dev: Device associated with the channels
* @dir: Direction of the channel
*/
int mhi_get_no_free_descriptors(struct mhi_device *mhi_dev,
enum dma_data_direction dir);
/**
* mhi_poll - poll for any available data to consume
* This is only applicable for DL direction
* @mhi_dev: Device associated with the channels
* @budget: In descriptors to service before returning
*/
int mhi_poll(struct mhi_device *mhi_dev, u32 budget);
/**
* mhi_ioctl - user space IOCTL support for MHI channels
* Native support for setting TIOCM
* @mhi_dev: Device associated with the channels
* @cmd: IOCTL cmd
* @arg: Optional parameter, iotcl cmd specific
*/
long mhi_ioctl(struct mhi_device *mhi_dev, unsigned int cmd, unsigned long arg);
/**
* mhi_alloc_controller - Allocate mhi_controller structure
* Allocate controller structure and additional data for controller
* private data. You may get the private data pointer by calling
* mhi_controller_get_devdata
* @size: # of additional bytes to allocate
*/
struct mhi_controller *mhi_alloc_controller(size_t size);
/**
* of_register_mhi_controller - Register MHI controller
* Registers MHI controller with MHI bus framework. DT must be supported
* @mhi_cntrl: MHI controller to register
*/
int of_register_mhi_controller(struct mhi_controller *mhi_cntrl);
void mhi_unregister_mhi_controller(struct mhi_controller *mhi_cntrl);
/**
* mhi_bdf_to_controller - Look up a registered controller
* Search for controller based on device identification
* @domain: RC domain of the device
* @bus: Bus device connected to
* @slot: Slot device assigned to
* @dev_id: Device Identification
*/
struct mhi_controller *mhi_bdf_to_controller(u32 domain, u32 bus, u32 slot,
u32 dev_id);
/**
* mhi_prepare_for_power_up - Do pre-initialization before power up
* This is optional, call this before power up if controller do not
* want bus framework to automatically free any allocated memory during shutdown
* process.
* @mhi_cntrl: MHI controller
*/
int mhi_prepare_for_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_async_power_up - Starts MHI power up sequence
* @mhi_cntrl: MHI controller
*/
int mhi_async_power_up(struct mhi_controller *mhi_cntrl);
int mhi_sync_power_up(struct mhi_controller *mhi_cntrl);
/**
* mhi_power_down - Start MHI power down sequence
* @mhi_cntrl: MHI controller
* @graceful: link is still accessible, do a graceful shutdown process otherwise
* we will shutdown host w/o putting device into RESET state
*/
void mhi_power_down(struct mhi_controller *mhi_cntrl, bool graceful);
/**
* mhi_unprepare_after_powre_down - free any allocated memory for power up
* @mhi_cntrl: MHI controller
*/
void mhi_unprepare_after_power_down(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_suspend - Move MHI into a suspended state
* Transition to MHI state M3 state from M0||M1||M2 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_suspend(struct mhi_controller *mhi_cntrl);
/**
* mhi_pm_resume - Resume MHI from suspended state
* Transition to MHI state M0 state from M3 state
* @mhi_cntrl: MHI controller
*/
int mhi_pm_resume(struct mhi_controller *mhi_cntrl);
/**
* mhi_download_rddm_img - Download ramdump image from device for
* debugging purpose.
* @mhi_cntrl: MHI controller
* @in_panic: If we trying to capture image while in kernel panic
*/
int mhi_download_rddm_img(struct mhi_controller *mhi_cntrl, bool in_panic);
/**
* mhi_force_rddm_mode - Force external device into rddm mode
* to collect device ramdump. This is useful if host driver assert
* and we need to see device state as well.
* @mhi_cntrl: MHI controller
*/
int mhi_force_rddm_mode(struct mhi_controller *mhi_cntrl);
#ifndef CONFIG_ARCH_QCOM
#ifdef CONFIG_MHI_DEBUG
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_VERBOSE) \
pr_dbg("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
} while (0)
#else
#define MHI_VERB(fmt, ...)
#endif
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_INFO) \
pr_info("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_alert("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
} while (0)
#else /* ARCH QCOM */
#include <linux/ipc_logging.h>
#ifdef CONFIG_MHI_DEBUG
#define MHI_VERB(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_VERBOSE) \
pr_err("[D][%s] " fmt, __func__, ##__VA_ARGS__);\
if (mhi_cntrl->log_buf && \
(mhi_cntrl->log_lvl <= MHI_MSG_LVL_VERBOSE)) \
ipc_log_string(mhi_cntrl->log_buf, "[D][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#else
#define MHI_VERB(fmt, ...)
#endif
#define MHI_LOG(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_INFO) \
pr_err("[I][%s] " fmt, __func__, ##__VA_ARGS__);\
if (mhi_cntrl->log_buf && \
(mhi_cntrl->log_lvl <= MHI_MSG_LVL_INFO)) \
ipc_log_string(mhi_cntrl->log_buf, "[I][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MHI_ERR(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_ERROR) \
pr_err("[E][%s] " fmt, __func__, ##__VA_ARGS__); \
if (mhi_cntrl->log_buf && \
(mhi_cntrl->log_lvl <= MHI_MSG_LVL_ERROR)) \
ipc_log_string(mhi_cntrl->log_buf, "[E][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#define MHI_CRITICAL(fmt, ...) do { \
if (mhi_cntrl->klog_lvl <= MHI_MSG_LVL_CRITICAL) \
pr_err("[C][%s] " fmt, __func__, ##__VA_ARGS__); \
if (mhi_cntrl->log_buf && \
(mhi_cntrl->log_lvl <= MHI_MSG_LVL_CRITICAL)) \
ipc_log_string(mhi_cntrl->log_buf, "[C][%s] " fmt, \
__func__, ##__VA_ARGS__); \
} while (0)
#endif
#endif /* _MHI_H_ */