Merge "cpufreq: qcom: Add support for thermal based configuration"
diff --git a/drivers/gpu/drm/bridge/lt9611uxc.c b/drivers/gpu/drm/bridge/lt9611uxc.c
index 3a40d05..95da862 100644
--- a/drivers/gpu/drm/bridge/lt9611uxc.c
+++ b/drivers/gpu/drm/bridge/lt9611uxc.c
@@ -139,6 +139,7 @@
 };
 
 static struct lt9611_timing_info lt9611_supp_timing_cfg[] = {
+	{3840, 2160, 24, 60, 4, 2}, /* 3840x2160 24bit 60Hz 4Lane 2ports */
 	{3840, 2160, 24, 30, 4, 2}, /* 3840x2160 24bit 30Hz 4Lane 2ports */
 	{1920, 1080, 24, 60, 4, 1}, /* 1080P 24bit 60Hz 4lane 1port */
 	{1920, 1080, 24, 30, 3, 1}, /* 1080P 24bit 30Hz 3lane 1port */
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index ee6dd1b..a1c3f0f 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -1340,6 +1340,17 @@
 	  This driver can also be built as a module. If so, the module will be
 	  called as i2c-opal.
 
+config I2C_MSM_V2
+	tristate "I2C_MSM_V2"
+	depends on ARCH_QCOM
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in I2C interface and its DMA engine on the MSM family
+	  processors.
+
+	  This driver can also be built as a module.  If so, the module
+	  will be called i2c-msm-v2.
+
 config I2C_ZX2967
 	tristate "ZTE ZX2967 I2C support"
 	depends on ARCH_ZX
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 18b26af..0da8471 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -115,6 +115,7 @@
 obj-$(CONFIG_I2C_XLR)		+= i2c-xlr.o
 obj-$(CONFIG_I2C_XLP9XX)	+= i2c-xlp9xx.o
 obj-$(CONFIG_I2C_RCAR)		+= i2c-rcar.o
+obj-$(CONFIG_I2C_MSM_V2)        += i2c-msm-v2.o
 obj-$(CONFIG_I2C_ZX2967)	+= i2c-zx2967.o
 
 # External I2C/SMBus adapter drivers
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
new file mode 100644
index 0000000..d6c0f45
--- /dev/null
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -0,0 +1,3039 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2014-2019, 2020, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * I2C controller driver for Qualcomm Technologies Inc platforms
+ */
+
+#define pr_fmt(fmt) "#%d " fmt "\n", __LINE__
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/mutex.h>
+#include <linux/timer.h>
+#include <linux/time.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/dma-mapping.h>
+#include <linux/i2c.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/msm-sps.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/i2c-msm-v2.h>
+
+#ifdef DEBUG
+static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_DBG;
+#else
+static const enum msm_i2_debug_level DEFAULT_DBG_LVL = MSM_ERR;
+#endif
+
+/* Forward declarations */
+static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl);
+static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
+						struct completion *complete);
+static int  i2c_msm_pm_resume(struct device *dev);
+static void i2c_msm_pm_suspend(struct device *dev);
+static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl);
+static struct pinctrl_state *
+	i2c_msm_rsrcs_gpio_get_state(struct i2c_msm_ctrl *ctrl,
+					const char *name);
+static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
+						bool runtime_active);
+
+/* string table for enum i2c_msm_xfer_mode_id */
+const char * const i2c_msm_mode_str_tbl[] = {
+	"FIFO", "BLOCK", "DMA", "None",
+};
+
+static const u32 i2c_msm_fifo_block_sz_tbl[] = {16, 16, 32, 0};
+
+/* from enum i2c_msm_xfer_mode_id to qup_io_modes register values */
+static const u32 i2c_msm_mode_to_reg_tbl[] = {
+	0x0, /* map I2C_MSM_XFER_MODE_FIFO -> binary 00 */
+	0x1, /* map I2C_MSM_XFER_MODE_BLOCK -> binary 01 */
+	0x3  /* map I2C_MSM_XFER_MODE_DMA -> binary 11 */
+};
+
+const char *i2c_msm_err_str_table[] = {
+	[I2C_MSM_NO_ERR]     = "NONE",
+	[I2C_MSM_ERR_NACK]   = "NACK: slave not responding, ensure its powered",
+	[I2C_MSM_ERR_ARB_LOST] = "ARB_LOST",
+	[I2C_MSM_ERR_BUS_ERR] = "BUS ERROR:noisy bus/unexpected start/stop tag",
+	[I2C_MSM_ERR_TIMEOUT]  = "TIMEOUT_ERROR",
+	[I2C_MSM_ERR_CORE_CLK] = "CLOCK OFF: Check Core Clock",
+	[I2C_MSM_ERR_OVR_UNDR_RUN] = "OVER_UNDER_RUN_ERROR",
+};
+
+static void i2c_msm_dbg_dump_diag(struct i2c_msm_ctrl *ctrl,
+				bool use_param_vals, u32 status, u32 qup_op)
+{
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	const char *str = i2c_msm_err_str_table[xfer->err];
+	char buf[I2C_MSM_REG_2_STR_BUF_SZ];
+
+	if (!use_param_vals) {
+		void __iomem        *base = ctrl->rsrcs.base;
+
+		status = readl_relaxed(base + QUP_I2C_STATUS);
+		qup_op = readl_relaxed(base + QUP_OPERATIONAL);
+	}
+
+	if (xfer->err == I2C_MSM_ERR_TIMEOUT) {
+		/*
+		 * if we are not the bus master or SDA/SCL is low then it may be
+		 * that slave is pulling the lines low. Otherwise it is likely a
+		 * GPIO issue
+		 */
+		if (!(status & QUP_BUS_MASTER))
+			snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
+				"%s(val:%dmsec) misconfigured GPIO or slave pulling bus line(s) low\n",
+				str, jiffies_to_msecs(xfer->timeout));
+		else
+			snprintf(buf, I2C_MSM_REG_2_STR_BUF_SZ,
+			"%s(val:%dmsec)", str, jiffies_to_msecs(xfer->timeout));
+
+		str = buf;
+	}
+
+	/* dump xfer details */
+	dev_err(ctrl->dev,
+		"%s: msgs(n:%d cur:%d %s) bc(rx:%zu tx:%zu) mode:%s slv_addr:0x%0x MSTR_STS:0x%08x OPER:0x%08x\n",
+		str, xfer->msg_cnt, xfer->cur_buf.msg_idx,
+		xfer->cur_buf.is_rx ? "rx" : "tx", xfer->rx_cnt, xfer->tx_cnt,
+		i2c_msm_mode_str_tbl[xfer->mode_id], xfer->msgs->addr,
+		status, qup_op);
+}
+
+static u32 i2c_msm_reg_io_modes_out_blk_sz(u32 qup_io_modes)
+{
+	return i2c_msm_fifo_block_sz_tbl[qup_io_modes & 0x3];
+}
+
+static u32 i2c_msm_reg_io_modes_in_blk_sz(u32 qup_io_modes)
+{
+	return i2c_msm_fifo_block_sz_tbl[BITS_AT(qup_io_modes, 5, 2)];
+}
+
+static const u32 i2c_msm_fifo_sz_table[] = {2, 4, 8, 16};
+
+static void i2c_msm_qup_fifo_calc_size(struct i2c_msm_ctrl *ctrl)
+{
+	u32 reg_data, output_fifo_size, input_fifo_size;
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+
+	/* Gurad to read fifo size only once. It hard wired and never changes */
+	if (fifo->input_fifo_sz && fifo->output_fifo_sz)
+		return;
+
+	reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
+	output_fifo_size  = BITS_AT(reg_data, 2, 2);
+	input_fifo_size   = BITS_AT(reg_data, 7, 2);
+
+	fifo->input_fifo_sz = i2c_msm_reg_io_modes_in_blk_sz(reg_data) *
+					i2c_msm_fifo_sz_table[input_fifo_size];
+	fifo->output_fifo_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data) *
+					i2c_msm_fifo_sz_table[output_fifo_size];
+
+	i2c_msm_dbg(ctrl, MSM_PROF, "QUP input-sz:%zu, input-sz:%zu\n",
+			fifo->input_fifo_sz, fifo->output_fifo_sz);
+
+}
+
+/*
+ * i2c_msm_tag_byte: accessor for tag as four bytes array
+ */
+static u8 *i2c_msm_tag_byte(struct i2c_msm_tag *tag, int byte_n)
+{
+	return ((u8 *)tag) + byte_n;
+}
+
+/*
+ * i2c_msm_buf_to_ptr: translates a xfer buf to a pointer into the i2c_msg data
+ */
+static u8 *i2c_msm_buf_to_ptr(struct i2c_msm_xfer_buf *buf)
+{
+	struct i2c_msm_xfer *xfer =
+				container_of(buf, struct i2c_msm_xfer, cur_buf);
+	struct i2c_msg *msg = xfer->msgs + buf->msg_idx;
+
+	return msg->buf + buf->byte_idx;
+}
+
+/*
+ * tag_lookup_table[is_new_addr][is_last][is_rx]
+ * @is_new_addr Is start tag required? (which requires two more bytes.)
+ * @is_last     Use the XXXXX_N_STOP tag variant
+ * @is_rx       READ/WRITE
+ */
+static const struct i2c_msm_tag tag_lookup_table[2][2][2] = {
+	{{{QUP_TAG2_DATA_WRITE,					2},
+	   {QUP_TAG2_DATA_READ,					2} },
+	/* last buffer */
+	  {{QUP_TAG2_DATA_WRITE_N_STOP,				2},
+	   {QUP_TAG2_DATA_READ_N_STOP,				2} } },
+	/* new addr */
+	 {{{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE           << 16), 4},
+	   {QUP_TAG2_START | (QUP_TAG2_DATA_READ            << 16), 4} },
+	/* last buffer + new addr */
+	  {{QUP_TAG2_START | (QUP_TAG2_DATA_WRITE_N_STOP    << 16), 4},
+	   {QUP_TAG2_START | (QUP_TAG2_DATA_READ_N_STOP     << 16), 4} } },
+};
+
+/*
+ * i2c_msm_tag_create: format a qup tag ver2
+ */
+static struct i2c_msm_tag i2c_msm_tag_create(bool is_new_addr, bool is_last_buf,
+					bool is_rx, u8 buf_len, u8 slave_addr)
+{
+	struct i2c_msm_tag tag;
+	/* Normalize booleans to 1 or 0 */
+	is_new_addr = is_new_addr ? 1 : 0;
+	is_last_buf = is_last_buf ? 1 : 0;
+	is_rx = is_rx ? 1 : 0;
+
+	tag = tag_lookup_table[is_new_addr][is_last_buf][is_rx];
+	/* fill in the non-const value: the address and the length */
+	if (tag.len == I2C_MSM_TAG2_MAX_LEN) {
+		*i2c_msm_tag_byte(&tag, 1) = slave_addr;
+		*i2c_msm_tag_byte(&tag, 3) = buf_len;
+	} else {
+		*i2c_msm_tag_byte(&tag, 1) = buf_len;
+	}
+
+	return tag;
+}
+
+static int
+i2c_msm_qup_state_wait_valid(struct i2c_msm_ctrl *ctrl,
+			enum i2c_msm_qup_state state, bool only_valid)
+{
+	u32 status;
+	void __iomem  *base     = ctrl->rsrcs.base;
+	int ret      = 0;
+	int read_cnt = 0;
+
+	do {
+		status = readl_relaxed(base + QUP_STATE);
+		++read_cnt;
+
+		/*
+		 * If only valid bit needs to be checked, requested state is
+		 * 'don't care'
+		 */
+		if (status & QUP_STATE_VALID) {
+			if (only_valid)
+				goto poll_valid_end;
+			else if ((state & QUP_I2C_MAST_GEN) &&
+					(status & QUP_I2C_MAST_GEN))
+				goto poll_valid_end;
+			else if ((status & QUP_STATE_MASK) == state)
+				goto poll_valid_end;
+		}
+
+		/*
+		 * Sleeping for 1-1.5 ms for every 100 iterations and break if
+		 * iterations crosses the 1500 marks allows roughly 10-15 msec
+		 * of time to get the core to valid state.
+		 */
+		if (!(read_cnt % 100))
+			usleep_range(1000, 1500);
+	} while (read_cnt <= 1500);
+
+	ret = -ETIMEDOUT;
+	dev_err(ctrl->dev,
+		"error timeout on polling for valid state. check core_clk\n");
+
+poll_valid_end:
+	if (!only_valid)
+		i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_VALID_END,
+				/* aggregate ret and state */
+				(((-ret) & 0xff) | ((state & 0xf) << 16)),
+				read_cnt, status);
+
+	return ret;
+}
+
+static int i2c_msm_qup_state_set(struct i2c_msm_ctrl *ctrl,
+						enum i2c_msm_qup_state state)
+{
+	if (i2c_msm_qup_state_wait_valid(ctrl, 0, true))
+		return -EIO;
+
+	writel_relaxed(state, ctrl->rsrcs.base + QUP_STATE);
+
+	if (i2c_msm_qup_state_wait_valid(ctrl, state, false))
+		return -EIO;
+
+	return 0;
+}
+
+static int i2c_msm_qup_sw_reset(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	writel_relaxed(1, ctrl->rsrcs.base + QUP_SW_RESET);
+	/*
+	 * Ensure that QUP that reset state is written before waiting for a the
+	 * reset state to be valid.
+	 */
+	wmb();
+	ret = i2c_msm_qup_state_wait_valid(ctrl, QUP_STATE_RESET, false);
+	if (ret) {
+		if (atomic_read(&ctrl->xfer.is_active))
+			ctrl->xfer.err = I2C_MSM_ERR_CORE_CLK;
+		dev_err(ctrl->dev, "error on issuing QUP software-reset\n");
+	}
+	return ret;
+}
+
+/*
+ * i2c_msm_qup_xfer_init_reset_state: setup QUP registers for the next run state
+ * @pre QUP must be in reset state.
+ * @pre xfer->mode_id is set to the chosen transfer state
+ * @post update values in QUP_MX_*_COUNT, QUP_CONFIG, QUP_IO_MODES,
+ *       and QUP_OPERATIONAL_MASK registers
+ */
+static void
+i2c_msm_qup_xfer_init_reset_state(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	void __iomem * const base = ctrl->rsrcs.base;
+	u32  mx_rd_cnt     = 0;
+	u32  mx_wr_cnt     = 0;
+	u32  mx_in_cnt     = 0;
+	u32  mx_out_cnt    = 0;
+	u32  no_input      = 0;
+	u32  no_output     = 0;
+	u32  input_mode    = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 12;
+	u32  output_mode   = i2c_msm_mode_to_reg_tbl[xfer->mode_id] << 10;
+	u32  config_reg;
+	u32  io_modes_reg;
+	u32  op_mask;
+	u32  rx_cnt = 0;
+	u32  tx_cnt = 0;
+	/*
+	 * DMA mode:
+	 * 1. QUP_MX_*_COUNT must be zero in all cases.
+	 * 2. both QUP_NO_INPUT and QUP_NO_OUTPUT are unset.
+	 * FIFO mode:
+	 * 1. QUP_MX_INPUT_COUNT and QUP_MX_OUTPUT_COUNT are zero
+	 * 2. QUP_MX_READ_COUNT and QUP_MX_WRITE_COUNT reflect true count
+	 * 3. QUP_NO_INPUT and QUP_NO_OUTPUT are set according to counts
+	 */
+	if (xfer->mode_id != I2C_MSM_XFER_MODE_DMA) {
+		rx_cnt   = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
+		tx_cnt   = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
+		no_input = rx_cnt  ? 0 : QUP_NO_INPUT;
+
+		switch (xfer->mode_id) {
+		case I2C_MSM_XFER_MODE_FIFO:
+			mx_rd_cnt  = rx_cnt;
+			mx_wr_cnt  = tx_cnt;
+			break;
+		case I2C_MSM_XFER_MODE_BLOCK:
+			mx_in_cnt  = rx_cnt;
+			mx_out_cnt = tx_cnt;
+			break;
+		default:
+			break;
+		}
+	}
+
+	/* init DMA/BLOCK modes counter */
+	writel_relaxed(mx_in_cnt,  base + QUP_MX_INPUT_COUNT);
+	writel_relaxed(mx_out_cnt, base + QUP_MX_OUTPUT_COUNT);
+
+	/* int FIFO mode counter */
+	writel_relaxed(mx_rd_cnt, base + QUP_MX_READ_COUNT);
+	writel_relaxed(mx_wr_cnt, base + QUP_MX_WRITE_COUNT);
+
+	/*
+	 * Set QUP mini-core to I2C tags ver-2
+	 * sets NO_INPUT / NO_OUTPUT as needed
+	 */
+	config_reg = readl_relaxed(base + QUP_CONFIG);
+	config_reg &=
+	      ~(QUP_NO_INPUT | QUP_NO_OUTPUT | QUP_N_MASK | QUP_MINI_CORE_MASK);
+	config_reg |= (no_input | no_output | QUP_N_VAL |
+							QUP_MINI_CORE_I2C_VAL);
+	writel_relaxed(config_reg, base + QUP_CONFIG);
+
+	/*
+	 * Turns-on packing/unpacking
+	 * sets NO_INPUT / NO_OUTPUT as needed
+	 */
+	io_modes_reg = readl_relaxed(base + QUP_IO_MODES);
+	io_modes_reg &=
+	   ~(QUP_INPUT_MODE | QUP_OUTPUT_MODE | QUP_PACK_EN | QUP_UNPACK_EN
+	     | QUP_OUTPUT_BIT_SHIFT_EN);
+	io_modes_reg |=
+	   (input_mode | output_mode | QUP_PACK_EN | QUP_UNPACK_EN);
+	writel_relaxed(io_modes_reg, base + QUP_IO_MODES);
+
+	/*
+	 * mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
+	 * change on DMA-mode transfers
+	 */
+	op_mask = (xfer->mode_id == I2C_MSM_XFER_MODE_DMA) ?
+		    (QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK) : 0;
+	writel_relaxed(op_mask, base + QUP_OPERATIONAL_MASK);
+	/* Ensure that QUP configuration is written before leaving this func */
+	wmb();
+}
+
+/*
+ * i2c_msm_clk_div_fld:
+ * @clk_freq_out output clock frequency
+ * @fs_div fs divider value
+ * @ht_div high time divider value
+ */
+struct i2c_msm_clk_div_fld {
+	u32                clk_freq_out;
+	u8                 fs_div;
+	u8                 ht_div;
+};
+
+/*
+ * divider values as per HW Designers
+ */
+static struct i2c_msm_clk_div_fld i2c_msm_clk_div_map[] = {
+	{KHz(100), 124, 62},
+	{KHz(400),  28, 14},
+	{KHz(1000),  8,  5},
+};
+
+/*
+ * @return zero on success
+ * @fs_div when zero use value from table above, otherwise use given value
+ * @ht_div when zero use value from table above, otherwise use given value
+ *
+ * Format the value to be configured into the clock divider register. This
+ * register is configured every time core is moved from reset to run state.
+ */
+static int i2c_msm_set_mstr_clk_ctl(struct i2c_msm_ctrl *ctrl, int fs_div,
+			int ht_div, int noise_rjct_scl, int noise_rjct_sda)
+{
+	int ret = 0;
+	int i;
+	u32 reg_val = 0;
+	struct i2c_msm_clk_div_fld *itr = i2c_msm_clk_div_map;
+
+	/* set noise rejection values for scl and sda */
+	reg_val = I2C_MSM_SCL_NOISE_REJECTION(reg_val, noise_rjct_scl);
+	reg_val = I2C_MSM_SDA_NOISE_REJECTION(reg_val, noise_rjct_sda);
+
+	/*
+	 * find matching freq and set divider values unless they are forced
+	 * from parametr list
+	 */
+	for (i = 0; i < ARRAY_SIZE(i2c_msm_clk_div_map); ++i, ++itr) {
+		if (ctrl->rsrcs.clk_freq_out == itr->clk_freq_out) {
+			if (!fs_div)
+				fs_div = itr->fs_div;
+			if (!ht_div)
+				ht_div = itr->ht_div;
+			break;
+		}
+	}
+
+	/* For non-standard clock freq, clk divider value
+	 * fs_div should be supplied by client through device tree
+	 */
+	if (!fs_div) {
+		dev_err(ctrl->dev, "Missing clk divider value in DT for %dKHz\n",
+			(ctrl->rsrcs.clk_freq_out / 1000));
+		return -EINVAL;
+	}
+
+	/* format values in clk-ctl cache */
+	ctrl->mstr_clk_ctl = (reg_val & (~0xff07ff)) | ((ht_div & 0xff) << 16)
+							|(fs_div & 0xff);
+
+	return ret;
+}
+
+/*
+ * i2c_msm_qup_xfer_init_run_state: set qup regs which must be set *after* reset
+ */
+static void i2c_msm_qup_xfer_init_run_state(struct i2c_msm_ctrl *ctrl)
+{
+	void __iomem *base = ctrl->rsrcs.base;
+
+	writel_relaxed(ctrl->mstr_clk_ctl, base + QUP_I2C_MASTER_CLK_CTL);
+
+	/* Ensure that QUP configuration is written before leaving this func */
+	wmb();
+
+	if (ctrl->dbgfs.dbg_lvl == MSM_DBG) {
+		dev_info(ctrl->dev,
+			"QUP state after programming for next transfers\n");
+		i2c_msm_dbg_qup_reg_dump(ctrl);
+	}
+}
+
+static void i2c_msm_fifo_wr_word(struct i2c_msm_ctrl *ctrl, u32 data)
+{
+	writel_relaxed(data, ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
+	i2c_msm_dbg(ctrl, MSM_DBG, "OUT-FIFO:0x%08x\n", data);
+}
+
+static u32 i2c_msm_fifo_rd_word(struct i2c_msm_ctrl *ctrl, u32 *data)
+{
+	u32 val;
+
+	val = readl_relaxed(ctrl->rsrcs.base + QUP_IN_FIFO_BASE);
+	i2c_msm_dbg(ctrl, MSM_DBG, "IN-FIFO :0x%08x\n", val);
+
+	if (data)
+		*data = val;
+
+	return val;
+}
+
+/*
+ * i2c_msm_fifo_wr_buf_flush:
+ */
+static void i2c_msm_fifo_wr_buf_flush(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+	u32 *word;
+
+	if (!fifo->out_buf_idx)
+		return;
+
+	word = (u32 *) fifo->out_buf;
+	i2c_msm_fifo_wr_word(ctrl, *word);
+	fifo->out_buf_idx = 0;
+	*word = 0;
+}
+
+/*
+ * i2c_msm_fifo_wr_buf:
+ *
+ * @len buf size (in bytes)
+ * @return number of bytes from buf which have been processed (written to
+ *         FIFO or kept in out buffer and will be written later)
+ */
+static size_t
+i2c_msm_fifo_wr_buf(struct i2c_msm_ctrl *ctrl, u8 *buf, size_t len)
+{
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+	int i;
+
+	for (i = 0 ; i < len; ++i, ++buf) {
+
+		fifo->out_buf[fifo->out_buf_idx] = *buf;
+		++fifo->out_buf_idx;
+
+		if (fifo->out_buf_idx == 4) {
+			u32 *word = (u32 *) fifo->out_buf;
+
+			i2c_msm_fifo_wr_word(ctrl, *word);
+			fifo->out_buf_idx = 0;
+			*word = 0;
+		}
+	}
+	return i;
+}
+
+static size_t i2c_msm_fifo_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
+	size_t len = 0;
+
+	if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
+		char str[I2C_MSM_REG_2_STR_BUF_SZ];
+
+		dev_info(ctrl->dev, "tag.val:0x%llx tag.len:%d %s\n",
+			buf->out_tag.val, buf->out_tag.len,
+			i2c_msm_dbg_tag_to_str(&buf->out_tag, str,
+								sizeof(str)));
+	}
+
+	if (buf->out_tag.len) {
+		len = i2c_msm_fifo_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
+							buf->out_tag.len);
+
+		if (len < buf->out_tag.len)
+			goto done;
+
+		buf->out_tag = (struct i2c_msm_tag) {0};
+	}
+done:
+	return len;
+}
+
+/*
+ * i2c_msm_fifo_read: reads up to fifo size into user's buf
+ */
+static void i2c_msm_fifo_read_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
+	struct i2c_msg          *msg = ctrl->xfer.msgs + buf->msg_idx;
+	u8 *p_tag_val   = (u8 *) &buf->in_tag.val;
+	int buf_need_bc = msg->len - buf->byte_idx;
+	u8  word[4];
+	int copy_bc;
+	int word_idx;
+	int word_bc;
+
+	if (!buf->is_rx)
+		return;
+
+	while (buf_need_bc || buf->in_tag.len) {
+		i2c_msm_fifo_rd_word(ctrl, (u32 *) word);
+		word_bc  = sizeof(word);
+		word_idx = 0;
+
+		/*
+		 * copy bytes from fifo word to tag.
+		 * @note buf->in_tag.len (max 2bytes) < word_bc (4bytes)
+		 */
+		if (buf->in_tag.len) {
+			copy_bc = min_t(int, word_bc, buf->in_tag.len);
+
+			memcpy(p_tag_val + buf->in_tag.len, word, copy_bc);
+
+			word_idx        += copy_bc;
+			word_bc         -= copy_bc;
+			buf->in_tag.len -= copy_bc;
+
+			if ((ctrl->dbgfs.dbg_lvl >= MSM_DBG) &&
+							!buf->in_tag.len) {
+				char str[64];
+
+				dev_info(ctrl->dev, "%s\n",
+					i2c_msm_dbg_tag_to_str(&buf->in_tag,
+							str, sizeof(str)));
+			}
+		}
+
+		/* copy bytes from fifo word to user's buffer */
+		copy_bc = min_t(int, word_bc, buf_need_bc);
+		memcpy(msg->buf + buf->byte_idx, word + word_idx, copy_bc);
+
+		buf->byte_idx += copy_bc;
+		buf_need_bc   -= copy_bc;
+	}
+}
+
+/*
+ * i2c_msm_fifo_write_xfer_buf: write xfer.cur_buf (user's-buf + tag) to fifo
+ */
+static void i2c_msm_fifo_write_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf  = &ctrl->xfer.cur_buf;
+	size_t len;
+	size_t tag_len;
+
+	tag_len = buf->out_tag.len;
+	len = i2c_msm_fifo_xfer_wr_tag(ctrl);
+	if (len < tag_len) {
+		dev_err(ctrl->dev, "error on writing tag to out FIFO\n");
+		return;
+	}
+
+	if (!buf->is_rx) {
+		if (ctrl->dbgfs.dbg_lvl >= MSM_DBG) {
+			char str[I2C_MSM_REG_2_STR_BUF_SZ];
+			int  offset = 0;
+			u8  *p      = i2c_msm_buf_to_ptr(buf);
+			int  i;
+
+			for (i = 0 ; i < len; ++i, ++p)
+				offset += scnprintf(str + offset,
+						   sizeof(str) - offset,
+						   "0x%x ", *p);
+			dev_info(ctrl->dev, "data: %s\n", str);
+		}
+
+		len = i2c_msm_fifo_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf),
+						buf->len);
+		if (len < buf->len)
+			dev_err(ctrl->dev, "error on xfering buf with FIFO\n");
+	}
+}
+
+/*
+ * i2c_msm_fifo_xfer_process:
+ *
+ * @pre    transfer size is less then or equal to fifo size.
+ * @pre    QUP in run state/pause
+ * @return zero on success
+ */
+static int i2c_msm_fifo_xfer_process(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf first_buf = ctrl->xfer.cur_buf;
+	int ret;
+
+	/* load fifo while in pause state to avoid race conditions */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
+	if (ret < 0)
+		return ret;
+
+	/* write all that goes to output fifo */
+	while (i2c_msm_xfer_next_buf(ctrl))
+		i2c_msm_fifo_write_xfer_buf(ctrl);
+
+	i2c_msm_fifo_wr_buf_flush(ctrl);
+
+	ctrl->xfer.cur_buf = first_buf;
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	/* wait for input done interrupt */
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
+	if (ret < 0)
+		return ret;
+
+	/* read all from input fifo */
+	while (i2c_msm_xfer_next_buf(ctrl))
+		i2c_msm_fifo_read_xfer_buf(ctrl);
+
+	return 0;
+}
+
+/*
+ * i2c_msm_fifo_xfer: process transfer using fifo mode
+ */
+static int i2c_msm_fifo_xfer(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "Starting FIFO transfer\n");
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers */
+	i2c_msm_qup_xfer_init_reset_state(ctrl);
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers which must be set *after* reset */
+	i2c_msm_qup_xfer_init_run_state(ctrl);
+
+	ret = i2c_msm_fifo_xfer_process(ctrl);
+
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_init_struct: Allocate memory and initialize blk structure
+ *
+ * @return 0 on success or error code
+ */
+static int i2c_msm_blk_init_struct(struct i2c_msm_ctrl *ctrl)
+{
+	u32 reg_data = readl_relaxed(ctrl->rsrcs.base + QUP_IO_MODES);
+	int ret;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+
+	blk->in_blk_sz  = i2c_msm_reg_io_modes_in_blk_sz(reg_data),
+	blk->out_blk_sz = i2c_msm_reg_io_modes_out_blk_sz(reg_data),
+
+	blk->tx_cache = kmalloc(blk->out_blk_sz, GFP_KERNEL);
+	if (!blk->tx_cache) {
+		ret = -ENOMEM;
+		goto out_buf_err;
+	}
+
+	blk->rx_cache = kmalloc(blk->in_blk_sz, GFP_KERNEL);
+	if (!blk->tx_cache) {
+		ret = -ENOMEM;
+		goto in_buf_err;
+	}
+
+	blk->is_init = true;
+	return 0;
+
+in_buf_err:
+	kfree(blk->tx_cache);
+out_buf_err:
+
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_wr_flush: flushes internal cached block to FIFO
+ *
+ * @return 0 on success or error code
+ */
+static int i2c_msm_blk_wr_flush(struct i2c_msm_ctrl *ctrl)
+{
+	int byte_num;
+	int ret = 0;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	u32 *buf_u32_ptr;
+
+	if (!blk->tx_cache_idx)
+		return 0;
+
+	/* if no blocks available wait for interrupt */
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_tx_blk);
+	if (ret)
+		return ret;
+
+	/*
+	 * pause the controller until we finish loading the block in order to
+	 * avoid race conditions
+	 */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
+	if (ret < 0)
+		return ret;
+	i2c_msm_dbg(ctrl, MSM_DBG, "OUT-BLK:%*phC\n", blk->tx_cache_idx,
+							blk->tx_cache);
+
+	for (byte_num = 0; byte_num < blk->tx_cache_idx;
+						byte_num += sizeof(u32)) {
+		buf_u32_ptr = (u32 *) (blk->tx_cache + byte_num);
+		writel_relaxed(*buf_u32_ptr,
+					ctrl->rsrcs.base + QUP_OUT_FIFO_BASE);
+		*buf_u32_ptr = 0;
+	}
+
+	/* now cache is empty */
+	blk->tx_cache_idx = 0;
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_wr_buf:
+ *
+ * @len buf size (in bytes)
+ * @return number of bytes from buf which have been processed (written to
+ *         FIFO or kept in out buffer and will be written later)
+ */
+static int
+i2c_msm_blk_wr_buf(struct i2c_msm_ctrl *ctrl, const u8 *buf, int len)
+{
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	int byte_num;
+	int ret = 0;
+
+	for (byte_num = 0; byte_num < len; ++byte_num, ++buf) {
+		blk->tx_cache[blk->tx_cache_idx] = *buf;
+		++blk->tx_cache_idx;
+
+		/* flush cached buffer to HW FIFO when full */
+		if (blk->tx_cache_idx == blk->out_blk_sz) {
+			ret = i2c_msm_blk_wr_flush(ctrl);
+			if (ret)
+				return ret;
+		}
+	}
+	return byte_num;
+}
+
+/*
+ * i2c_msm_blk_xfer_wr_tag: buffered writing the tag of current buf
+ * @return zero on success
+ */
+static int i2c_msm_blk_xfer_wr_tag(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf = &ctrl->xfer.cur_buf;
+	int len = 0;
+
+	if (!buf->out_tag.len)
+		return 0;
+
+	len = i2c_msm_blk_wr_buf(ctrl, (u8 *) &buf->out_tag.val,
+							buf->out_tag.len);
+	if (len != buf->out_tag.len)
+		return -EFAULT;
+
+	buf->out_tag = (struct i2c_msm_tag) {0};
+	return 0;
+}
+
+/*
+ * i2c_msm_blk_wr_xfer_buf: writes ctrl->xfer.cur_buf to HW
+ *
+ * @return zero on success
+ */
+static int i2c_msm_blk_wr_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *buf  = &ctrl->xfer.cur_buf;
+	int len;
+	int ret;
+
+	ret = i2c_msm_blk_xfer_wr_tag(ctrl);
+	if (ret)
+		return ret;
+
+	len = i2c_msm_blk_wr_buf(ctrl, i2c_msm_buf_to_ptr(buf), buf->len);
+	if (len < buf->len)
+		return -EFAULT;
+
+	buf->byte_idx += len;
+	return 0;
+}
+
+/*
+ * i2c_msm_blk_rd_blk: read a block from HW FIFO to internal cache
+ *
+ * @return number of bytes read or negative error value
+ * @need_bc number of bytes that we need
+ *
+ * uses internal counter to keep track of number of available blocks. When
+ * zero, waits for interrupt.
+ */
+static int i2c_msm_blk_rd_blk(struct i2c_msm_ctrl *ctrl, int need_bc)
+{
+	int byte_num;
+	int ret = 0;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	u32 *cache_ptr = (u32 *) blk->rx_cache;
+	int read_bc    = min_t(int, blk->in_blk_sz, need_bc);
+
+	/* wait for block avialble interrupt */
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &blk->wait_rx_blk);
+	if (ret)
+		return ret;
+
+	/* Read block from HW to cache */
+	for (byte_num = 0; byte_num < blk->in_blk_sz;
+					byte_num += sizeof(u32)) {
+		if (byte_num < read_bc) {
+			*cache_ptr = readl_relaxed(ctrl->rsrcs.base +
+							QUP_IN_FIFO_BASE);
+			++cache_ptr;
+		}
+	}
+	blk->rx_cache_idx = 0;
+	return read_bc;
+}
+
+/*
+ * i2c_msm_blk_rd_xfer_buf: fill in ctrl->xfer.cur_buf from HW
+ *
+ * @return zero on success
+ */
+static int i2c_msm_blk_rd_xfer_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	struct i2c_msm_xfer_buf *buf      = &ctrl->xfer.cur_buf;
+	struct i2c_msg *msg               = ctrl->xfer.msgs + buf->msg_idx;
+	int    copy_bc;         /* number of bytes to copy to user's buffer */
+	int    cache_avail_bc;
+	int    ret = 0;
+
+	/* write tag to out FIFO */
+	ret = i2c_msm_blk_xfer_wr_tag(ctrl);
+	if (ret)
+		return ret;
+	i2c_msm_blk_wr_flush(ctrl);
+
+	while (buf->len || buf->in_tag.len) {
+		cache_avail_bc = i2c_msm_blk_rd_blk(ctrl,
+						buf->len + buf->in_tag.len);
+
+		i2c_msm_dbg(ctrl, MSM_DBG, "IN-BLK:%*phC\n", cache_avail_bc,
+					blk->rx_cache + blk->rx_cache_idx);
+
+		if (cache_avail_bc < 0)
+			return cache_avail_bc;
+
+		/* discard tag from input FIFO */
+		if (buf->in_tag.len) {
+			int discard_bc = min_t(int, cache_avail_bc,
+							buf->in_tag.len);
+			blk->rx_cache_idx += discard_bc;
+			buf->in_tag.len   -= discard_bc;
+			cache_avail_bc    -= discard_bc;
+		}
+
+		/* copy bytes from cached block to user's buffer */
+		copy_bc = min_t(int, cache_avail_bc, buf->len);
+		memcpy(msg->buf + buf->byte_idx,
+			blk->rx_cache + blk->rx_cache_idx, copy_bc);
+
+		blk->rx_cache_idx += copy_bc;
+		buf->len          -= copy_bc;
+		buf->byte_idx     += copy_bc;
+	}
+	return ret;
+}
+
+/*
+ * i2c_msm_blk_xfer: process transfer using block mode
+ */
+static int i2c_msm_blk_xfer(struct i2c_msm_ctrl *ctrl)
+{
+	int ret = 0;
+	struct i2c_msm_xfer_buf      *buf = &ctrl->xfer.cur_buf;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+
+	if (!blk->is_init) {
+		ret = i2c_msm_blk_init_struct(ctrl);
+		if (!blk->is_init)
+			return ret;
+	}
+
+	init_completion(&blk->wait_rx_blk);
+	init_completion(&blk->wait_tx_blk);
+
+	/* tx_cnt > 0 always */
+	blk->complete_mask = QUP_MAX_OUTPUT_DONE_FLAG;
+	if (ctrl->xfer.rx_cnt)
+		blk->complete_mask |= QUP_MAX_INPUT_DONE_FLAG;
+
+	/* initialize block mode for new transfer */
+	blk->tx_cache_idx = 0;
+	blk->rx_cache_idx = 0;
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers */
+	i2c_msm_qup_xfer_init_reset_state(ctrl);
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		return ret;
+
+	/* program qup registers which must be set *after* reset */
+	i2c_msm_qup_xfer_init_run_state(ctrl);
+
+	while (i2c_msm_xfer_next_buf(ctrl)) {
+		if (buf->is_rx) {
+			ret = i2c_msm_blk_rd_xfer_buf(ctrl);
+			if (ret)
+				return ret;
+			/*
+			 * SW workaround to wait for extra interrupt from
+			 * hardware for last block in block mode for read
+			 */
+			if (buf->is_last) {
+				ret = i2c_msm_xfer_wait_for_completion(ctrl,
+							&blk->wait_rx_blk);
+				if (!ret)
+					complete(&ctrl->xfer.complete);
+			}
+		} else {
+			ret = i2c_msm_blk_wr_xfer_buf(ctrl);
+			if (ret)
+				return ret;
+		}
+	}
+	i2c_msm_blk_wr_flush(ctrl);
+	return i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
+}
+
+/*
+ * i2c_msm_dma_xfer_prepare: map DMA buffers, and create tags.
+ * @return zero on success or negative error value
+ */
+static int i2c_msm_dma_xfer_prepare(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma  = &ctrl->xfer.dma;
+	struct i2c_msm_xfer_buf      *buf  = &ctrl->xfer.cur_buf;
+	struct i2c_msm_dma_chan      *tx = &dma->chan[I2C_MSM_DMA_TX];
+	struct i2c_msm_dma_chan      *rx = &dma->chan[I2C_MSM_DMA_RX];
+	struct i2c_msm_dma_buf *dma_buf;
+	int                     rem_buf_cnt = I2C_MSM_DMA_DESC_ARR_SIZ;
+	struct i2c_msg         *cur_msg;
+	enum dma_data_direction buf_dma_dirctn;
+	struct i2c_msm_dma_mem  data;
+	u8        *tag_arr_itr_vrtl_addr;
+	dma_addr_t tag_arr_itr_phy_addr;
+
+	tx->desc_cnt_cur    = 0;
+	rx->desc_cnt_cur    = 0;
+	dma->buf_arr_cnt      = 0;
+	dma_buf               = dma->buf_arr;
+	tag_arr_itr_vrtl_addr = ((u8 *) dma->tag_arr.vrtl_addr);
+	tag_arr_itr_phy_addr  = dma->tag_arr.phy_addr;
+
+	for (; i2c_msm_xfer_next_buf(ctrl) && rem_buf_cnt;
+		++dma_buf,
+		tag_arr_itr_phy_addr  += sizeof(dma_addr_t),
+		tag_arr_itr_vrtl_addr += sizeof(dma_addr_t)) {
+
+		/* dma-map the client's message */
+		cur_msg        = ctrl->xfer.msgs + buf->msg_idx;
+		data.vrtl_addr = cur_msg->buf + buf->byte_idx;
+		if (buf->is_rx) {
+			buf_dma_dirctn  = DMA_FROM_DEVICE;
+			rx->desc_cnt_cur += 2; /* msg + tag */
+			tx->desc_cnt_cur += 1; /* tag */
+		} else {
+			buf_dma_dirctn  = DMA_TO_DEVICE;
+			tx->desc_cnt_cur += 2; /* msg + tag */
+		}
+
+		/* for last buffer in a transfer msg */
+		if (buf->is_last) {
+			/* add ovrhead byte cnt for tags specific to DMA mode */
+			ctrl->xfer.rx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags*/
+			ctrl->xfer.tx_ovrhd_cnt += 2; /* EOT+FLUSH_STOP tags */
+
+			/* increment rx desc cnt to read off tags and
+			 * increment tx desc cnt to queue EOT+FLUSH_STOP tags
+			 */
+			tx->desc_cnt_cur++;
+			rx->desc_cnt_cur++;
+		}
+
+		if ((rx->desc_cnt_cur >= I2C_MSM_DMA_RX_SZ) ||
+		    (tx->desc_cnt_cur >= I2C_MSM_DMA_TX_SZ))
+			return -ENOMEM;
+
+		data.phy_addr = dma_map_single(ctrl->dev, data.vrtl_addr,
+						buf->len, buf_dma_dirctn);
+
+		if (dma_mapping_error(ctrl->dev, data.phy_addr)) {
+			dev_err(ctrl->dev,
+			  "error DMA mapping DMA buffers, err:%lld buf_vrtl:0x%pK data_len:%d dma_dir:%s\n",
+			  (u64) data.phy_addr, data.vrtl_addr, buf->len,
+			  ((buf_dma_dirctn == DMA_FROM_DEVICE)
+				? "DMA_FROM_DEVICE" : "DMA_TO_DEVICE"));
+			return -EFAULT;
+		}
+
+		/* copy 8 bytes. Only tag.len bytes will be used */
+		*((u64 *)tag_arr_itr_vrtl_addr) =  buf->out_tag.val;
+
+		i2c_msm_dbg(ctrl, MSM_DBG,
+			"vrtl:0x%pK phy:0x%llx val:0x%llx sizeof(dma_addr_t):%zu\n",
+			tag_arr_itr_vrtl_addr, (u64) tag_arr_itr_phy_addr,
+			*((u64 *)tag_arr_itr_vrtl_addr), sizeof(dma_addr_t));
+
+		/*
+		 * create dma buf, in the dma buf arr, based on the buf created
+		 * by i2c_msm_xfer_next_buf()
+		 */
+		*dma_buf = (struct i2c_msm_dma_buf) {
+			.ptr      = data,
+			.len      = buf->len,
+			.dma_dir  = buf_dma_dirctn,
+			.is_rx    = buf->is_rx,
+			.is_last  = buf->is_last,
+			.tag      = (struct i2c_msm_dma_tag) {
+				.buf = tag_arr_itr_phy_addr,
+				.len = buf->out_tag.len,
+			},
+		};
+		++dma->buf_arr_cnt;
+		--rem_buf_cnt;
+	}
+	return 0;
+}
+
+/*
+ * i2c_msm_dma_xfer_unprepare: DAM unmap buffers.
+ */
+static void i2c_msm_dma_xfer_unprepare(struct i2c_msm_ctrl *ctrl)
+{
+	int i;
+	struct i2c_msm_dma_buf *buf_itr = ctrl->xfer.dma.buf_arr;
+
+	for (i = 0 ; i < ctrl->xfer.dma.buf_arr_cnt ; ++i, ++buf_itr)
+		dma_unmap_single(ctrl->dev, buf_itr->ptr.phy_addr, buf_itr->len,
+							buf_itr->dma_dir);
+}
+
+static void i2c_msm_dma_callback_tx_complete(void *dma_async_param)
+{
+	struct i2c_msm_ctrl *ctrl = dma_async_param;
+
+	complete(&ctrl->xfer.complete);
+}
+
+static void i2c_msm_dma_callback_rx_complete(void *dma_async_param)
+{
+	struct i2c_msm_ctrl *ctrl = dma_async_param;
+
+	complete(&ctrl->xfer.rx_complete);
+}
+
+/*
+ * i2c_msm_dma_xfer_process: Queue transfers to DMA
+ * @pre 1)QUP is in run state. 2) i2c_msm_dma_xfer_prepare() was called.
+ * @return zero on success or negative error value
+ */
+static int i2c_msm_dma_xfer_process(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
+	struct i2c_msm_dma_chan *tx       = &dma->chan[I2C_MSM_DMA_TX];
+	struct i2c_msm_dma_chan *rx       = &dma->chan[I2C_MSM_DMA_RX];
+	struct scatterlist *sg_rx         = NULL;
+	struct scatterlist *sg_rx_itr     = NULL;
+	struct scatterlist *sg_tx         = NULL;
+	struct scatterlist *sg_tx_itr     = NULL;
+	struct dma_async_tx_descriptor     *dma_desc_rx;
+	struct dma_async_tx_descriptor     *dma_desc_tx;
+	struct i2c_msm_dma_buf             *buf_itr;
+	int  i;
+	int  ret = 0;
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "Going to enqueue %zu buffers in DMA\n",
+							dma->buf_arr_cnt);
+
+	/* Set the QUP State to pause while DMA completes the txn */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_PAUSE);
+	if (ret) {
+		dev_err(ctrl->dev, "transition to pause state failed before DMA transaction :%d\n",
+									ret);
+		return ret;
+	}
+
+	sg_tx = kcalloc(tx->desc_cnt_cur, sizeof(struct scatterlist),
+								GFP_KERNEL);
+	if (!sg_tx) {
+		ret = -ENOMEM;
+		goto dma_xfer_end;
+	}
+	sg_init_table(sg_tx, tx->desc_cnt_cur);
+	sg_tx_itr = sg_tx;
+
+	sg_rx = kcalloc(rx->desc_cnt_cur, sizeof(struct scatterlist),
+								GFP_KERNEL);
+	if (!sg_rx) {
+		ret = -ENOMEM;
+		goto dma_xfer_end;
+	}
+	sg_init_table(sg_rx, rx->desc_cnt_cur);
+	sg_rx_itr = sg_rx;
+
+	buf_itr = dma->buf_arr;
+
+	for (i = 0; i < dma->buf_arr_cnt ; ++i, ++buf_itr) {
+		/* Queue tag */
+		sg_dma_address(sg_tx_itr) = buf_itr->tag.buf;
+		sg_dma_len(sg_tx_itr) = buf_itr->tag.len;
+		++sg_tx_itr;
+
+		/* read off tag + len bytes(don't care) in input FIFO
+		 * on read transfer
+		 */
+		if (buf_itr->is_rx) {
+			/* rid of input tag */
+			sg_dma_address(sg_rx_itr) =
+					ctrl->xfer.dma.input_tag.phy_addr;
+			sg_dma_len(sg_rx_itr)     = QUP_BUF_OVERHD_BC;
+			++sg_rx_itr;
+
+			/* queue data buffer */
+			sg_dma_address(sg_rx_itr) = buf_itr->ptr.phy_addr;
+			sg_dma_len(sg_rx_itr)     = buf_itr->len;
+			++sg_rx_itr;
+		} else {
+			sg_dma_address(sg_tx_itr) = buf_itr->ptr.phy_addr;
+			sg_dma_len(sg_tx_itr)     = buf_itr->len;
+			++sg_tx_itr;
+		}
+	}
+
+	/* this tag will be copied to rx fifo */
+	sg_dma_address(sg_tx_itr) = dma->eot_n_flush_stop_tags.phy_addr;
+	sg_dma_len(sg_tx_itr)     = QUP_BUF_OVERHD_BC;
+	++sg_tx_itr;
+
+	/*
+	 * Reading the tag off the input fifo has side effects and
+	 * it is mandatory for getting the DMA's interrupt.
+	 */
+	sg_dma_address(sg_rx_itr) = ctrl->xfer.dma.input_tag.phy_addr;
+	sg_dma_len(sg_rx_itr)     = QUP_BUF_OVERHD_BC;
+	++sg_rx_itr;
+
+	/*
+	 * We only want a single BAM interrupt per transfer, and we always
+	 * add a flush-stop i2c tag as the last tx sg entry. Since the dma
+	 * driver puts the supplied BAM flags only on the last BAM descriptor,
+	 * the flush stop will always be the one which generate that interrupt
+	 * and invokes the callback.
+	 */
+	dma_desc_tx = dmaengine_prep_slave_sg(tx->dma_chan,
+						sg_tx,
+						sg_tx_itr - sg_tx,
+						tx->dir,
+						(SPS_IOVEC_FLAG_EOT |
+							SPS_IOVEC_FLAG_NWD));
+	if (IS_ERR_OR_NULL(dma_desc_tx)) {
+		dev_err(ctrl->dev, "error dmaengine_prep_slave_sg tx:%ld\n",
+							PTR_ERR(dma_desc_tx));
+		ret = dma_desc_tx ? PTR_ERR(dma_desc_tx) : -ENOMEM;
+		goto dma_xfer_end;
+	}
+
+	/* callback defined for tx dma desc */
+	dma_desc_tx->callback       = i2c_msm_dma_callback_tx_complete;
+	dma_desc_tx->callback_param = ctrl;
+	dmaengine_submit(dma_desc_tx);
+	dma_async_issue_pending(tx->dma_chan);
+
+	/* queue the rx dma desc */
+	dma_desc_rx = dmaengine_prep_slave_sg(rx->dma_chan, sg_rx,
+					sg_rx_itr - sg_rx, rx->dir,
+					(SPS_IOVEC_FLAG_EOT |
+							SPS_IOVEC_FLAG_NWD));
+	if (IS_ERR_OR_NULL(dma_desc_rx)) {
+		dev_err(ctrl->dev,
+			"error dmaengine_prep_slave_sg rx:%ld\n",
+						PTR_ERR(dma_desc_rx));
+		ret = dma_desc_rx ? PTR_ERR(dma_desc_rx) : -ENOMEM;
+		goto dma_xfer_end;
+	}
+
+	dma_desc_rx->callback       = i2c_msm_dma_callback_rx_complete;
+	dma_desc_rx->callback_param = ctrl;
+	dmaengine_submit(dma_desc_rx);
+	dma_async_issue_pending(rx->dma_chan);
+
+	/* Set the QUP State to Run when completes the txn */
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret) {
+		dev_err(ctrl->dev, "transition to run state failed before DMA transaction :%d\n",
+									ret);
+		goto dma_xfer_end;
+	}
+
+	ret = i2c_msm_xfer_wait_for_completion(ctrl, &ctrl->xfer.complete);
+	if (!ret && ctrl->xfer.rx_cnt)
+		ret = i2c_msm_xfer_wait_for_completion(ctrl,
+						&ctrl->xfer.rx_complete);
+
+dma_xfer_end:
+	/* free scatter-gather lists */
+	kfree(sg_tx);
+	kfree(sg_rx);
+
+	return ret;
+}
+
+static void i2c_msm_dma_free_channels(struct i2c_msm_ctrl *ctrl)
+{
+	int i;
+
+	for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
+		struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
+
+		if (!chan->is_init)
+			continue;
+
+		dma_release_channel(chan->dma_chan);
+		chan->is_init  = false;
+		chan->dma_chan = NULL;
+	}
+	if (ctrl->xfer.dma.state > I2C_MSM_DMA_INIT_CORE)
+		ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CORE;
+}
+
+static const char * const i2c_msm_dma_chan_name[] = {"tx", "rx"};
+
+static int i2c_msm_dmaengine_dir[] = {
+	DMA_MEM_TO_DEV, DMA_DEV_TO_MEM
+};
+
+static int i2c_msm_dma_init_channels(struct i2c_msm_ctrl *ctrl)
+{
+	int ret = 0;
+	int i;
+
+	/* Iterate over the dma channels to initialize them */
+	for (i = 0; i < I2C_MSM_DMA_CNT; ++i) {
+		struct dma_slave_config cfg = {0};
+		struct i2c_msm_dma_chan *chan = &ctrl->xfer.dma.chan[i];
+
+		if (chan->is_init)
+			continue;
+
+		chan->name     = i2c_msm_dma_chan_name[i];
+		chan->dma_chan = dma_request_slave_channel(ctrl->dev,
+								chan->name);
+		if (!chan->dma_chan) {
+			dev_err(ctrl->dev,
+				"error dma_request_slave_channel(dev:%s chan:%s)\n",
+				dev_name(ctrl->dev), chan->name);
+			/* free the channels if allocated before */
+			i2c_msm_dma_free_channels(ctrl);
+			return -ENODEV;
+		}
+
+		chan->dir = cfg.direction = i2c_msm_dmaengine_dir[i];
+		ret = dmaengine_slave_config(chan->dma_chan, &cfg);
+		if (ret) {
+			dev_err(ctrl->dev,
+			"error:%d dmaengine_slave_config(chan:%s)\n",
+						ret, chan->name);
+			dma_release_channel(chan->dma_chan);
+			chan->dma_chan = NULL;
+			i2c_msm_dma_free_channels(ctrl);
+			return ret;
+		}
+		chan->is_init = true;
+	}
+	ctrl->xfer.dma.state = I2C_MSM_DMA_INIT_CHAN;
+	return 0;
+}
+
+static void i2c_msm_dma_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
+
+	i2c_msm_dma_free_channels(ctrl);
+
+	if (dma->state > I2C_MSM_DMA_INIT_NONE)
+		dma_free_coherent(ctrl->dev, I2C_MSM_DMA_TAG_MEM_SZ,
+				  dma->input_tag.vrtl_addr,
+				  dma->input_tag.phy_addr);
+
+	dma->state = I2C_MSM_DMA_INIT_NONE;
+}
+
+static int i2c_msm_dma_init(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_dma *dma = &ctrl->xfer.dma;
+	u8             *tags_space_virt_addr;
+	dma_addr_t      tags_space_phy_addr;
+
+	/* check if DMA core is initialized */
+	if (dma->state > I2C_MSM_DMA_INIT_NONE)
+		goto dma_core_is_init;
+
+	/*
+	 * allocate dma memory for input_tag + eot_n_flush_stop_tags + tag_arr
+	 * for more see: I2C_MSM_DMA_TAG_MEM_SZ definition
+	 */
+	tags_space_virt_addr = dma_alloc_coherent(
+						ctrl->dev,
+						I2C_MSM_DMA_TAG_MEM_SZ,
+						&tags_space_phy_addr,
+						GFP_KERNEL);
+	if (!tags_space_virt_addr) {
+		dev_err(ctrl->dev,
+		  "error alloc %d bytes of DMAable memory for DMA tags space\n",
+		  I2C_MSM_DMA_TAG_MEM_SZ);
+		return -ENOMEM;
+	}
+
+	/*
+	 * set the dma-tags virtual and physical addresses:
+	 * 1) the first tag space is for the input (throw away) tag
+	 */
+	dma->input_tag.vrtl_addr  = tags_space_virt_addr;
+	dma->input_tag.phy_addr   = tags_space_phy_addr;
+
+	/* 2) second tag space is for eot_flush_stop tag which is const value */
+	tags_space_virt_addr += I2C_MSM_TAG2_MAX_LEN;
+	tags_space_phy_addr  += I2C_MSM_TAG2_MAX_LEN;
+	dma->eot_n_flush_stop_tags.vrtl_addr = tags_space_virt_addr;
+	dma->eot_n_flush_stop_tags.phy_addr  = tags_space_phy_addr;
+
+	/* set eot_n_flush_stop_tags value */
+	*((u16 *) dma->eot_n_flush_stop_tags.vrtl_addr) =
+				QUP_TAG2_INPUT_EOT | (QUP_TAG2_FLUSH_STOP << 8);
+
+	/* 3) all other tag spaces are used for transfer tags */
+	tags_space_virt_addr  += I2C_MSM_TAG2_MAX_LEN;
+	tags_space_phy_addr   += I2C_MSM_TAG2_MAX_LEN;
+	dma->tag_arr.vrtl_addr = tags_space_virt_addr;
+	dma->tag_arr.phy_addr  = tags_space_phy_addr;
+
+	dma->state = I2C_MSM_DMA_INIT_CORE;
+
+dma_core_is_init:
+	return i2c_msm_dma_init_channels(ctrl);
+}
+
+static int i2c_msm_dma_xfer(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	ret = i2c_msm_dma_init(ctrl);
+	if (ret) {
+		dev_err(ctrl->dev, "DMA Init Failed: %d\n", ret);
+		return ret;
+	}
+
+	/* dma map user's buffers and create tags */
+	ret = i2c_msm_dma_xfer_prepare(ctrl);
+	if (ret < 0) {
+		dev_err(ctrl->dev, "error on i2c_msm_dma_xfer_prepare():%d\n",
+									ret);
+		goto err_dma_xfer;
+	}
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+	if (ret < 0)
+		goto err_dma_xfer;
+
+	/* program qup registers */
+	i2c_msm_qup_xfer_init_reset_state(ctrl);
+
+	ret = i2c_msm_qup_state_set(ctrl, QUP_STATE_RUN);
+	if (ret < 0)
+		goto err_dma_xfer;
+
+	/* program qup registers which must be set *after* reset */
+	i2c_msm_qup_xfer_init_run_state(ctrl);
+
+	/* enqueue transfer buffers */
+	ret = i2c_msm_dma_xfer_process(ctrl);
+	if (ret)
+		dev_err(ctrl->dev,
+			"error i2c_msm_dma_xfer_process(n_bufs:%zu):%d\n",
+			ctrl->xfer.dma.buf_arr_cnt, ret);
+
+err_dma_xfer:
+	i2c_msm_dma_xfer_unprepare(ctrl);
+	return ret;
+}
+
+/*
+ * i2c_msm_qup_slv_holds_bus: true when slave hold the SDA low
+ */
+static bool i2c_msm_qup_slv_holds_bus(struct i2c_msm_ctrl *ctrl)
+{
+	u32 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
+
+	bool slv_holds_bus =	!(status & QUP_I2C_SDA) &&
+				(status & QUP_BUS_ACTIVE) &&
+				!(status & QUP_BUS_MASTER);
+	if (slv_holds_bus)
+		dev_info(ctrl->dev,
+			"bus lines held low by a slave detected\n");
+
+	return slv_holds_bus;
+}
+
+/*
+ * i2c_msm_qup_poll_bus_active_unset: poll until QUP_BUS_ACTIVE is unset
+ *
+ * @return zero when bus inactive, or nonzero on timeout.
+ *
+ * Loop and reads QUP_I2C_MASTER_STATUS until bus is inactive or timeout
+ * reached. Used to avoid race condition due to gap between QUP completion
+ * interrupt and QUP issuing stop signal on the bus.
+ */
+static int i2c_msm_qup_poll_bus_active_unset(struct i2c_msm_ctrl *ctrl)
+{
+	void __iomem *base    = ctrl->rsrcs.base;
+	ulong timeout = jiffies + msecs_to_jiffies(I2C_MSM_MAX_POLL_MSEC);
+	int    ret      = 0;
+	size_t read_cnt = 0;
+
+	do {
+		if (!(readl_relaxed(base + QUP_I2C_STATUS) & QUP_BUS_ACTIVE))
+			goto poll_active_end;
+		++read_cnt;
+	} while (time_before_eq(jiffies, timeout));
+
+	ret = -EBUSY;
+
+poll_active_end:
+	/* second logged value is time-left before timeout or zero if expired */
+	i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_ACTV_END,
+				ret, (ret ? 0 : (timeout - jiffies)), read_cnt);
+
+	return ret;
+}
+
+static void i2c_msm_clk_path_vote(struct i2c_msm_ctrl *ctrl)
+{
+	i2c_msm_clk_path_init(ctrl);
+
+	if (ctrl->rsrcs.clk_path_vote.client_hdl)
+		msm_bus_scale_client_update_request(
+					ctrl->rsrcs.clk_path_vote.client_hdl,
+					I2C_MSM_CLK_PATH_RESUME_VEC);
+}
+
+static void i2c_msm_clk_path_unvote(struct i2c_msm_ctrl *ctrl)
+{
+	if (ctrl->rsrcs.clk_path_vote.client_hdl)
+		msm_bus_scale_client_update_request(
+					ctrl->rsrcs.clk_path_vote.client_hdl,
+					I2C_MSM_CLK_PATH_SUSPEND_VEC);
+}
+
+static void i2c_msm_clk_path_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	if (ctrl->rsrcs.clk_path_vote.client_hdl) {
+		msm_bus_scale_unregister_client(
+					ctrl->rsrcs.clk_path_vote.client_hdl);
+		ctrl->rsrcs.clk_path_vote.client_hdl = 0;
+	}
+}
+
+/*
+ * i2c_msm_clk_path_init_structs: internal impl detail of i2c_msm_clk_path_init
+ *
+ * allocates and initilizes the bus scaling vectors.
+ */
+static int i2c_msm_clk_path_init_structs(struct i2c_msm_ctrl *ctrl)
+{
+	struct msm_bus_vectors *paths    = NULL;
+	struct msm_bus_paths   *usecases = NULL;
+
+	i2c_msm_dbg(ctrl, MSM_PROF, "initializes path clock voting structs\n");
+
+	paths = kzalloc(sizeof(*paths) * 2, GFP_KERNEL);
+	if (!paths)
+		return -ENOMEM;
+
+	usecases = kzalloc(sizeof(*usecases) * 2, GFP_KERNEL);
+	if (!usecases)
+		goto path_init_err;
+
+	ctrl->rsrcs.clk_path_vote.pdata = kzalloc(
+				       sizeof(*ctrl->rsrcs.clk_path_vote.pdata),
+				       GFP_KERNEL);
+	if (!ctrl->rsrcs.clk_path_vote.pdata)
+		goto path_init_err;
+
+	paths[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_vectors) {
+		.src = ctrl->rsrcs.clk_path_vote.mstr_id,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = 0,
+		.ib  = 0,
+	};
+
+	paths[I2C_MSM_CLK_PATH_RESUME_VEC]  = (struct msm_bus_vectors) {
+		.src = ctrl->rsrcs.clk_path_vote.mstr_id,
+		.dst = MSM_BUS_SLAVE_EBI_CH0,
+		.ab  = I2C_MSM_CLK_PATH_AVRG_BW(ctrl),
+		.ib  = I2C_MSM_CLK_PATH_BRST_BW(ctrl),
+	};
+
+	usecases[I2C_MSM_CLK_PATH_SUSPEND_VEC] = (struct msm_bus_paths) {
+		.num_paths = 1,
+		.vectors   = &paths[I2C_MSM_CLK_PATH_SUSPEND_VEC],
+	};
+
+	usecases[I2C_MSM_CLK_PATH_RESUME_VEC] = (struct msm_bus_paths) {
+		.num_paths = 1,
+		.vectors   = &paths[I2C_MSM_CLK_PATH_RESUME_VEC],
+	};
+
+	*ctrl->rsrcs.clk_path_vote.pdata = (struct msm_bus_scale_pdata) {
+		.usecase      = usecases,
+		.num_usecases = 2,
+		.name         = dev_name(ctrl->dev),
+	};
+
+	return 0;
+
+path_init_err:
+	kfree(paths);
+	kfree(usecases);
+	kfree(ctrl->rsrcs.clk_path_vote.pdata);
+	ctrl->rsrcs.clk_path_vote.pdata = NULL;
+	return -ENOMEM;
+}
+
+/*
+ * i2c_msm_clk_path_postponed_register: reg with bus-scaling after it is probed
+ *
+ * @return zero on success
+ *
+ * Workaround: i2c driver may be probed before the bus scaling driver. Calling
+ * msm_bus_scale_register_client() will fail if the bus scaling driver is not
+ * ready yet. Thus, this function should be called not from probe but from a
+ * later context. Also, this function may be called more then once before
+ * register succeed. At this case only one error message will be logged. At boot
+ * time all clocks are on, so earlier i2c transactions should succeed.
+ */
+static int i2c_msm_clk_path_postponed_register(struct i2c_msm_ctrl *ctrl)
+{
+	ctrl->rsrcs.clk_path_vote.client_hdl =
+		msm_bus_scale_register_client(ctrl->rsrcs.clk_path_vote.pdata);
+
+	if (ctrl->rsrcs.clk_path_vote.client_hdl) {
+		if (ctrl->rsrcs.clk_path_vote.reg_err) {
+			/* log a success message if an error msg was logged */
+			ctrl->rsrcs.clk_path_vote.reg_err = false;
+			dev_err(ctrl->dev,
+				"msm_bus_scale_register_client(mstr-id:%d):0x%x (ok)\n",
+				ctrl->rsrcs.clk_path_vote.mstr_id,
+				ctrl->rsrcs.clk_path_vote.client_hdl);
+		}
+	} else {
+		/* guard to log only one error on multiple failure */
+		if (!ctrl->rsrcs.clk_path_vote.reg_err) {
+			ctrl->rsrcs.clk_path_vote.reg_err = true;
+
+			dev_info(ctrl->dev,
+				"msm_bus_scale_register_client(mstr-id:%d):0 (not a problem)\n",
+				ctrl->rsrcs.clk_path_vote.mstr_id);
+		}
+	}
+
+	return ctrl->rsrcs.clk_path_vote.client_hdl ? 0 : -EAGAIN;
+}
+
+static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl)
+{
+	/*
+	 * bail out if path voting is diabled (master_id == 0) or if it is
+	 * already registered (client_hdl != 0)
+	 */
+	if (!ctrl->rsrcs.clk_path_vote.mstr_id ||
+		ctrl->rsrcs.clk_path_vote.client_hdl)
+		return;
+
+	/* if fail once then try no more */
+	if (!ctrl->rsrcs.clk_path_vote.pdata &&
+					i2c_msm_clk_path_init_structs(ctrl)) {
+		ctrl->rsrcs.clk_path_vote.mstr_id = 0;
+		return;
+	}
+
+	/* on failure try again later */
+	if (i2c_msm_clk_path_postponed_register(ctrl))
+		return;
+}
+
+/*
+ * i2c_msm_qup_isr: QUP interrupt service routine
+ */
+static irqreturn_t i2c_msm_qup_isr(int irq, void *devid)
+{
+	struct i2c_msm_ctrl *ctrl = devid;
+	void __iomem        *base = ctrl->rsrcs.base;
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	struct i2c_msm_xfer_mode_blk *blk = &ctrl->xfer.blk;
+	u32  err_flags  = 0;
+	u32  clr_flds   = 0;
+	bool log_event       = false;
+	bool signal_complete = false;
+	bool need_wmb        = false;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_IRQ_BGN, irq, 0, 0);
+
+	if (!atomic_read(&ctrl->xfer.is_active)) {
+		dev_info(ctrl->dev, "irq:%d when no active transfer\n", irq);
+		return IRQ_HANDLED;
+	}
+
+	ctrl->i2c_sts_reg  = readl_relaxed(base + QUP_I2C_STATUS);
+	err_flags	   = readl_relaxed(base + QUP_ERROR_FLAGS);
+	ctrl->qup_op_reg   = readl_relaxed(base + QUP_OPERATIONAL);
+
+	if (ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK) {
+		signal_complete = true;
+		log_event       = true;
+		/*
+		 * If there is more than 1 error here, last one sticks.
+		 * The order of the error set here matters.
+		 */
+		if (ctrl->i2c_sts_reg & QUP_ARB_LOST)
+			ctrl->xfer.err = I2C_MSM_ERR_ARB_LOST;
+
+		if (ctrl->i2c_sts_reg & QUP_BUS_ERROR)
+			ctrl->xfer.err = I2C_MSM_ERR_BUS_ERR;
+
+		if (ctrl->i2c_sts_reg & QUP_PACKET_NACKED)
+			ctrl->xfer.err = I2C_MSM_ERR_NACK;
+	}
+
+	/* check for FIFO over/under runs error */
+	if (err_flags & QUP_ERR_FLGS_MASK)
+		ctrl->xfer.err = I2C_MSM_ERR_OVR_UNDR_RUN;
+
+	/* Dump the register values before reset the core */
+	if (ctrl->xfer.err && ctrl->dbgfs.dbg_lvl >= MSM_DBG)
+		i2c_msm_dbg_qup_reg_dump(ctrl);
+
+	/* clear interrupts fields */
+	clr_flds = ctrl->i2c_sts_reg & QUP_MSTR_STTS_ERR_MASK;
+	if (clr_flds) {
+		writel_relaxed(clr_flds, base + QUP_I2C_STATUS);
+		need_wmb = true;
+	}
+
+	clr_flds = err_flags & QUP_ERR_FLGS_MASK;
+	if (clr_flds) {
+		writel_relaxed(clr_flds,  base + QUP_ERROR_FLAGS);
+		need_wmb = true;
+	}
+
+	clr_flds = ctrl->qup_op_reg &
+			(QUP_OUTPUT_SERVICE_FLAG |
+			QUP_INPUT_SERVICE_FLAG);
+	if (clr_flds) {
+		writel_relaxed(clr_flds, base + QUP_OPERATIONAL);
+		need_wmb = true;
+	}
+
+	if (need_wmb)
+		/*
+		 * flush writes that clear the interrupt flags before changing
+		 * state to reset.
+		 */
+		wmb();
+
+	/* Reset and bail out on error */
+	if (ctrl->xfer.err) {
+		/* Flush for the tags in case of an error and DMA Mode*/
+		if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA) {
+			writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
+								+ QUP_STATE);
+			/*
+			 * Ensure that QUP_I2C_FLUSH is written before
+			 * State reset
+			 */
+			wmb();
+		}
+
+		/* HW workaround: when interrupt is level triggerd, more
+		 * than one interrupt may fire in error cases. Thus we
+		 * change the QUP core state to Reset immediately in the
+		 * ISR to ward off the next interrupt.
+		 */
+		writel_relaxed(QUP_STATE_RESET, ctrl->rsrcs.base + QUP_STATE);
+
+		signal_complete = true;
+		log_event       = true;
+		goto isr_end;
+	}
+
+	/* handle data completion */
+	if (xfer->mode_id == I2C_MSM_XFER_MODE_BLOCK) {
+		/* block ready for writing */
+		if (ctrl->qup_op_reg & QUP_OUTPUT_SERVICE_FLAG) {
+			log_event = true;
+			if (ctrl->qup_op_reg & QUP_OUT_BLOCK_WRITE_REQ)
+				complete(&blk->wait_tx_blk);
+
+			if ((ctrl->qup_op_reg & blk->complete_mask)
+					== blk->complete_mask) {
+				log_event       = true;
+				signal_complete = true;
+			}
+		}
+		/* block ready for reading */
+		if (ctrl->qup_op_reg & QUP_INPUT_SERVICE_FLAG) {
+			log_event = true;
+			complete(&blk->wait_rx_blk);
+		}
+	} else {
+		/* for FIFO/DMA Mode*/
+		if (ctrl->qup_op_reg & QUP_MAX_INPUT_DONE_FLAG) {
+			log_event = true;
+			/*
+			 * If last transaction is an input then the entire
+			 * transfer is done
+			 */
+			if (ctrl->xfer.last_is_rx)
+				signal_complete = true;
+		}
+		/*
+		 * Ideally, would like to check QUP_MAX_OUTPUT_DONE_FLAG.
+		 * However, QUP_MAX_OUTPUT_DONE_FLAG is lagging behind
+		 * QUP_OUTPUT_SERVICE_FLAG. The only reason for
+		 * QUP_OUTPUT_SERVICE_FLAG to be set in FIFO mode is
+		 * QUP_MAX_OUTPUT_DONE_FLAG condition. The code checking
+		 * here QUP_OUTPUT_SERVICE_FLAG and assumes that
+		 * QUP_MAX_OUTPUT_DONE_FLAG.
+		 */
+		if (ctrl->qup_op_reg & (QUP_OUTPUT_SERVICE_FLAG |
+						QUP_MAX_OUTPUT_DONE_FLAG)) {
+			log_event = true;
+			/*
+			 * If last transaction is an output then the
+			 * entire transfer is done
+			 */
+			if (!ctrl->xfer.last_is_rx)
+				signal_complete = true;
+		}
+	}
+
+isr_end:
+	if (log_event || (ctrl->dbgfs.dbg_lvl >= MSM_DBG))
+		i2c_msm_prof_evnt_add(ctrl, MSM_PROF,
+					I2C_MSM_IRQ_END,
+					ctrl->i2c_sts_reg, ctrl->qup_op_reg,
+					err_flags);
+
+	if (signal_complete)
+		complete(&ctrl->xfer.complete);
+
+	return IRQ_HANDLED;
+}
+
+static void i2x_msm_blk_free_cache(struct i2c_msm_ctrl *ctrl)
+{
+	kfree(ctrl->xfer.blk.tx_cache);
+	kfree(ctrl->xfer.blk.rx_cache);
+}
+
+static void i2c_msm_qup_init(struct i2c_msm_ctrl *ctrl)
+{
+	u32 state;
+	void __iomem *base = ctrl->rsrcs.base;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_PROF_RESET, 0, 0, 0);
+
+	i2c_msm_qup_sw_reset(ctrl);
+	i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+
+	writel_relaxed(QUP_N_VAL | QUP_MINI_CORE_I2C_VAL, base + QUP_CONFIG);
+
+	writel_relaxed(QUP_OUTPUT_OVER_RUN_ERR_EN | QUP_INPUT_UNDER_RUN_ERR_EN
+		     | QUP_OUTPUT_UNDER_RUN_ERR_EN | QUP_INPUT_OVER_RUN_ERR_EN,
+					base + QUP_ERROR_FLAGS_EN);
+
+	writel_relaxed(QUP_INPUT_SERVICE_MASK | QUP_OUTPUT_SERVICE_MASK,
+					base + QUP_OPERATIONAL_MASK);
+
+	writel_relaxed(QUP_EN_VERSION_TWO_TAG, base + QUP_I2C_MASTER_CONFIG);
+
+	i2c_msm_qup_fifo_calc_size(ctrl);
+	/*
+	 * Ensure that QUP configuration is written and that fifo size if read
+	 * before leaving this function
+	 */
+	mb();
+
+	state = readl_relaxed(base + QUP_STATE);
+
+	if (!(state & QUP_I2C_MAST_GEN))
+		dev_err(ctrl->dev,
+			"error on verifying HW support (I2C_MAST_GEN=0)\n");
+}
+
+static void qup_i2c_recover_bit_bang(struct i2c_msm_ctrl *ctrl)
+{
+	int i, ret;
+	int gpio_clk;
+	int gpio_dat;
+	bool gpio_clk_status = false;
+	u32 status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
+	struct pinctrl_state *bitbang;
+
+	dev_info(ctrl->dev, "Executing bus recovery procedure (9 clk pulse)\n");
+	disable_irq(ctrl->rsrcs.irq);
+	if (!(status & (I2C_STATUS_BUS_ACTIVE)) ||
+		(status & (I2C_STATUS_BUS_MASTER))) {
+		dev_warn(ctrl->dev, "unexpected i2c recovery call:0x%x\n",
+				    status);
+		goto recovery_exit;
+	}
+
+	gpio_clk = of_get_named_gpio(ctrl->adapter.dev.of_node, "qcom,i2c-clk",
+				     0);
+	gpio_dat = of_get_named_gpio(ctrl->adapter.dev.of_node, "qcom,i2c-dat",
+				     0);
+
+	if (gpio_clk < 0 || gpio_dat < 0) {
+		dev_warn(ctrl->dev, "SW bigbang err: i2c gpios not known\n");
+		goto recovery_exit;
+	}
+
+	bitbang = i2c_msm_rsrcs_gpio_get_state(ctrl, "i2c_bitbang");
+	if (bitbang)
+		ret = pinctrl_select_state(ctrl->rsrcs.pinctrl, bitbang);
+	if (!bitbang || ret) {
+		dev_err(ctrl->dev, "GPIO pins have no bitbang setting\n");
+		goto recovery_exit;
+	}
+	for (i = 0; i < 10; i++) {
+		if (gpio_get_value(gpio_dat) && gpio_clk_status)
+			break;
+		gpio_direction_output(gpio_clk, 0);
+		udelay(5);
+		gpio_direction_output(gpio_dat, 0);
+		udelay(5);
+		gpio_direction_input(gpio_clk);
+		udelay(5);
+		if (!gpio_get_value(gpio_clk))
+			udelay(20);
+		if (!gpio_get_value(gpio_clk))
+			usleep_range(10000, 10001);
+		gpio_clk_status = gpio_get_value(gpio_clk);
+		gpio_direction_input(gpio_dat);
+		udelay(5);
+	}
+
+	i2c_msm_pm_pinctrl_state(ctrl, true);
+	udelay(10);
+
+	status = readl_relaxed(ctrl->rsrcs.base + QUP_I2C_STATUS);
+	if (!(status & I2C_STATUS_BUS_ACTIVE)) {
+		dev_info(ctrl->dev,
+			"Bus busy cleared after %d clock cycles, status %x\n",
+			 i, status);
+		goto recovery_exit;
+	}
+
+	dev_warn(ctrl->dev, "Bus still busy, status %x\n", status);
+
+recovery_exit:
+	enable_irq(ctrl->rsrcs.irq);
+}
+
+static int i2c_msm_qup_post_xfer(struct i2c_msm_ctrl *ctrl, int err)
+{
+	/* poll until bus is released */
+	if (i2c_msm_qup_poll_bus_active_unset(ctrl)) {
+		if ((ctrl->xfer.err == I2C_MSM_ERR_ARB_LOST) ||
+		    (ctrl->xfer.err == I2C_MSM_ERR_BUS_ERR)  ||
+		    (ctrl->xfer.err == I2C_MSM_ERR_TIMEOUT)) {
+			if (i2c_msm_qup_slv_holds_bus(ctrl))
+				qup_i2c_recover_bit_bang(ctrl);
+
+			/* do not generalize error to EIO if its already set */
+			if (!err)
+				err = -EIO;
+		}
+	}
+
+	/*
+	 * Disable the IRQ before change to reset state to avoid
+	 * spurious interrupts.
+	 *
+	 */
+	disable_irq(ctrl->rsrcs.irq);
+
+	/* flush dma data and reset the qup core in timeout error.
+	 * for other error case, its handled by the ISR
+	 */
+	if (ctrl->xfer.err & I2C_MSM_ERR_TIMEOUT) {
+		/* Flush for the DMA registers */
+		if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
+			writel_relaxed(QUP_I2C_FLUSH, ctrl->rsrcs.base
+								+ QUP_STATE);
+
+		/* reset the qup core */
+		i2c_msm_qup_state_set(ctrl, QUP_STATE_RESET);
+		err = -ETIMEDOUT;
+	} else if (ctrl->xfer.err == I2C_MSM_ERR_NACK) {
+		err = -ENOTCONN;
+	}
+
+	return err;
+}
+
+static enum i2c_msm_xfer_mode_id
+i2c_msm_qup_choose_mode(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_mode_fifo *fifo = &ctrl->xfer.fifo;
+	struct i2c_msm_xfer           *xfer = &ctrl->xfer;
+	size_t rx_cnt_sum = xfer->rx_cnt + xfer->rx_ovrhd_cnt;
+	size_t tx_cnt_sum = xfer->tx_cnt + xfer->tx_ovrhd_cnt;
+
+
+	if (ctrl->dbgfs.force_xfer_mode != I2C_MSM_XFER_MODE_NONE)
+		return ctrl->dbgfs.force_xfer_mode;
+
+	if (((rx_cnt_sum < fifo->input_fifo_sz) &&
+		(tx_cnt_sum < fifo->output_fifo_sz)))
+		return I2C_MSM_XFER_MODE_FIFO;
+
+	if (ctrl->rsrcs.disable_dma)
+		return I2C_MSM_XFER_MODE_BLOCK;
+
+	return I2C_MSM_XFER_MODE_DMA;
+}
+
+/*
+ * i2c_msm_xfer_calc_timeout: calc maximum xfer time in jiffies
+ *
+ * Basically timeout = (bit_count / frequency) * safety_coefficient.
+ * The safety-coefficient also accounts for debugging delay (mostly from
+ * printk() calls).
+ */
+static void i2c_msm_xfer_calc_timeout(struct i2c_msm_ctrl *ctrl)
+{
+	size_t byte_cnt = ctrl->xfer.rx_cnt + ctrl->xfer.tx_cnt;
+	size_t bit_cnt  = byte_cnt * 9;
+	size_t bit_usec = (bit_cnt * USEC_PER_SEC) / ctrl->rsrcs.clk_freq_out;
+	size_t loging_ovrhd_coef = ctrl->dbgfs.dbg_lvl + 1;
+	size_t safety_coef   = I2C_MSM_TIMEOUT_SAFETY_COEF * loging_ovrhd_coef;
+	size_t xfer_max_usec = (bit_usec * safety_coef) +
+						I2C_MSM_TIMEOUT_MIN_USEC;
+
+	ctrl->xfer.timeout = usecs_to_jiffies(xfer_max_usec);
+}
+
+static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
+						struct completion *complete)
+{
+	struct i2c_msm_xfer *xfer = &ctrl->xfer;
+	long  time_left;
+	int   ret = 0;
+
+	time_left = wait_for_completion_timeout(complete,
+						xfer->timeout);
+	if (!time_left) {
+		xfer->err = I2C_MSM_ERR_TIMEOUT;
+		i2c_msm_dbg_dump_diag(ctrl, false, 0, 0);
+		ret = -EIO;
+		i2c_msm_prof_evnt_add(ctrl, MSM_ERR, I2C_MSM_COMPLT_FL,
+					xfer->timeout, time_left, 0);
+	} else {
+		/* return an error if one detected by ISR */
+		if (ctrl->xfer.err ||
+				(ctrl->dbgfs.dbg_lvl >= MSM_DBG)) {
+			i2c_msm_dbg_dump_diag(ctrl, true,
+					ctrl->i2c_sts_reg, ctrl->qup_op_reg);
+			ret = -(xfer->err);
+		}
+		i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_COMPLT_OK,
+					xfer->timeout, time_left, 0);
+	}
+
+	return ret;
+}
+
+static u16 i2c_msm_slv_rd_wr_addr(u16 slv_addr, bool is_rx)
+{
+	return (slv_addr << 1) | (is_rx ? 0x1 : 0x0);
+}
+
+/*
+ * @return true when the current transfer's buffer points to the last message
+ *    of the user's request.
+ */
+static bool i2c_msm_xfer_msg_is_last(struct i2c_msm_ctrl *ctrl)
+{
+	return ctrl->xfer.cur_buf.msg_idx >= (ctrl->xfer.msg_cnt - 1);
+}
+
+/*
+ * @return true when the current transfer's buffer points to the last
+ *    transferable buffer (size =< QUP_MAX_BUF_SZ) of the last message of the
+ *    user's request.
+ */
+static bool i2c_msm_xfer_buf_is_last(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
+	struct i2c_msg *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
+
+	return i2c_msm_xfer_msg_is_last(ctrl) &&
+		((cur_buf->byte_idx + QUP_MAX_BUF_SZ) >= cur_msg->len);
+}
+
+static void i2c_msm_xfer_create_cur_tag(struct i2c_msm_ctrl *ctrl,
+								bool start_req)
+{
+	struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
+
+	cur_buf->out_tag = i2c_msm_tag_create(start_req, cur_buf->is_last,
+					cur_buf->is_rx, cur_buf->len,
+					cur_buf->slv_addr);
+
+	cur_buf->in_tag.len = cur_buf->is_rx ? QUP_BUF_OVERHD_BC : 0;
+}
+
+/*
+ * i2c_msm_xfer_next_buf: support cases when msg.len > 256 bytes
+ *
+ * @return true when next buffer exist, or false when no such buffer
+ */
+static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer_buf *cur_buf = &ctrl->xfer.cur_buf;
+	struct i2c_msg          *cur_msg = ctrl->xfer.msgs + cur_buf->msg_idx;
+	int bc_rem = cur_msg->len - cur_buf->end_idx;
+
+	if (cur_buf->is_init && cur_buf->end_idx && bc_rem) {
+		/* not the first buffer in a message */
+
+		cur_buf->byte_idx  = cur_buf->end_idx;
+		cur_buf->is_last   = i2c_msm_xfer_buf_is_last(ctrl);
+		cur_buf->len       = min_t(int, bc_rem, QUP_MAX_BUF_SZ);
+		cur_buf->end_idx  += cur_buf->len;
+
+		/* No Start is required if it is not a first buffer in msg */
+		i2c_msm_xfer_create_cur_tag(ctrl, false);
+	} else {
+		/* first buffer in a new message */
+		if (cur_buf->is_init) {
+			if (i2c_msm_xfer_msg_is_last(ctrl))
+				return false;
+
+			++cur_buf->msg_idx;
+			++cur_msg;
+		} else {
+			cur_buf->is_init = true;
+		}
+		cur_buf->byte_idx  = 0;
+		cur_buf->is_last   = i2c_msm_xfer_buf_is_last(ctrl);
+		cur_buf->len       = min_t(int, cur_msg->len, QUP_MAX_BUF_SZ);
+		cur_buf->is_rx     = (cur_msg->flags & I2C_M_RD);
+		cur_buf->end_idx   = cur_buf->len;
+		cur_buf->slv_addr  = i2c_msm_slv_rd_wr_addr(cur_msg->addr,
+								cur_buf->is_rx);
+		i2c_msm_xfer_create_cur_tag(ctrl, true);
+	}
+	i2c_msm_prof_evnt_add(ctrl, MSM_DBG, I2C_MSM_NEXT_BUF, cur_buf->msg_idx,
+							cur_buf->byte_idx, 0);
+	return  true;
+}
+
+static void i2c_msm_pm_clk_unprepare(struct i2c_msm_ctrl *ctrl)
+{
+	clk_unprepare(ctrl->rsrcs.core_clk);
+	clk_unprepare(ctrl->rsrcs.iface_clk);
+}
+
+static int i2c_msm_pm_clk_prepare(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	ret = clk_prepare(ctrl->rsrcs.iface_clk);
+
+	if (ret) {
+		dev_err(ctrl->dev,
+			"error on clk_prepare(iface_clk):%d\n", ret);
+		return ret;
+	}
+
+	ret = clk_prepare(ctrl->rsrcs.core_clk);
+	if (ret) {
+		clk_unprepare(ctrl->rsrcs.iface_clk);
+		dev_err(ctrl->dev,
+			"error clk_prepare(core_clk):%d\n", ret);
+	}
+	return ret;
+}
+
+static void i2c_msm_pm_clk_disable(struct i2c_msm_ctrl *ctrl)
+{
+	clk_disable(ctrl->rsrcs.core_clk);
+	clk_disable(ctrl->rsrcs.iface_clk);
+}
+
+static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	ret = clk_enable(ctrl->rsrcs.iface_clk);
+	if (ret) {
+		dev_err(ctrl->dev,
+			"error on clk_enable(iface_clk):%d\n", ret);
+		i2c_msm_pm_clk_unprepare(ctrl);
+		return ret;
+	}
+	ret = clk_enable(ctrl->rsrcs.core_clk);
+	if (ret) {
+		clk_disable(ctrl->rsrcs.iface_clk);
+		i2c_msm_pm_clk_unprepare(ctrl);
+		dev_err(ctrl->dev,
+			"error clk_enable(core_clk):%d\n", ret);
+	}
+	return ret;
+}
+
+static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	mutex_lock(&ctrl->xfer.mtx);
+
+	i2c_msm_pm_pinctrl_state(ctrl, true);
+	pm_runtime_get_sync(ctrl->dev);
+	/*
+	 * if runtime PM callback was not invoked (when both runtime-pm
+	 * and systme-pm are in transition concurrently)
+	 */
+	if (ctrl->pwr_state != I2C_MSM_PM_RT_ACTIVE) {
+		dev_info(ctrl->dev, "Runtime PM-callback was not invoked\n");
+		i2c_msm_pm_resume(ctrl->dev);
+	}
+
+	ret = i2c_msm_pm_clk_enable(ctrl);
+	if (ret) {
+		mutex_unlock(&ctrl->xfer.mtx);
+		return ret;
+	}
+	i2c_msm_qup_init(ctrl);
+
+	/* Set xfer to active state (efectively enabling our ISR)*/
+	atomic_set(&ctrl->xfer.is_active, 1);
+
+	enable_irq(ctrl->rsrcs.irq);
+	return 0;
+}
+
+static void i2c_msm_pm_xfer_end(struct i2c_msm_ctrl *ctrl)
+{
+
+	atomic_set(&ctrl->xfer.is_active, 0);
+
+	/*
+	 * DMA resources are freed due to multi-EE use case.
+	 * Other EEs can potentially use the DMA
+	 * resources with in the same runtime PM vote.
+	 */
+	if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
+		i2c_msm_dma_free_channels(ctrl);
+
+	i2c_msm_pm_clk_disable(ctrl);
+
+	if (!pm_runtime_enabled(ctrl->dev))
+		i2c_msm_pm_suspend(ctrl->dev);
+
+	pm_runtime_mark_last_busy(ctrl->dev);
+	pm_runtime_put_autosuspend(ctrl->dev);
+	i2c_msm_pm_pinctrl_state(ctrl, false);
+	mutex_unlock(&ctrl->xfer.mtx);
+}
+
+/*
+ * i2c_msm_xfer_scan: initial input scan
+ */
+static void i2c_msm_xfer_scan(struct i2c_msm_ctrl *ctrl)
+{
+	struct i2c_msm_xfer     *xfer      = &ctrl->xfer;
+	struct i2c_msm_xfer_buf *cur_buf   = &xfer->cur_buf;
+
+	while (i2c_msm_xfer_next_buf(ctrl)) {
+
+		if (cur_buf->is_rx)
+			xfer->rx_cnt += cur_buf->len;
+		else
+			xfer->tx_cnt += cur_buf->len;
+
+		xfer->rx_ovrhd_cnt += cur_buf->in_tag.len;
+		xfer->tx_ovrhd_cnt += cur_buf->out_tag.len;
+
+		if (i2c_msm_xfer_msg_is_last(ctrl))
+			xfer->last_is_rx = cur_buf->is_rx;
+	}
+	xfer->cur_buf = (struct i2c_msm_xfer_buf){0};
+}
+
+static int
+i2c_msm_frmwrk_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
+{
+	int ret = 0;
+	struct i2c_msm_ctrl      *ctrl = i2c_get_adapdata(adap);
+	struct i2c_msm_xfer      *xfer = &ctrl->xfer;
+
+	if (IS_ERR_OR_NULL(msgs)) {
+		dev_err(ctrl->dev, " error on msgs Accessing invalid  pointer location\n");
+		return (msgs) ? PTR_ERR(msgs) : -EINVAL;
+	}
+
+	/* if system is suspended just bail out */
+	if (ctrl->pwr_state == I2C_MSM_PM_SYS_SUSPENDED) {
+		dev_err(ctrl->dev,
+				"slave:0x%x is calling xfer when system is suspended\n",
+				msgs->addr);
+		return -EIO;
+	}
+
+	ret = i2c_msm_pm_xfer_start(ctrl);
+	if (ret)
+		return ret;
+
+	/* init xfer */
+	xfer->msgs         = msgs;
+	xfer->msg_cnt      = num;
+	xfer->mode_id      = I2C_MSM_XFER_MODE_NONE;
+	xfer->err          = 0;
+	xfer->rx_cnt       = 0;
+	xfer->tx_cnt       = 0;
+	xfer->rx_ovrhd_cnt = 0;
+	xfer->tx_ovrhd_cnt = 0;
+	atomic_set(&xfer->event_cnt, 0);
+	init_completion(&xfer->complete);
+	init_completion(&xfer->rx_complete);
+
+	xfer->cur_buf.is_init = false;
+	xfer->cur_buf.msg_idx = 0;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_BEG, num,
+								msgs->addr, 0);
+
+	i2c_msm_xfer_scan(ctrl);
+	i2c_msm_xfer_calc_timeout(ctrl);
+	xfer->mode_id = i2c_msm_qup_choose_mode(ctrl);
+
+	dev_dbg(ctrl->dev, "xfer() mode:%d msg_cnt:%d rx_cbt:%zu tx_cnt:%zu\n",
+		xfer->mode_id, xfer->msg_cnt, xfer->rx_cnt, xfer->tx_cnt);
+
+	switch (xfer->mode_id) {
+	case I2C_MSM_XFER_MODE_FIFO:
+		ret = i2c_msm_fifo_xfer(ctrl);
+		break;
+	case I2C_MSM_XFER_MODE_BLOCK:
+		ret = i2c_msm_blk_xfer(ctrl);
+		break;
+	case I2C_MSM_XFER_MODE_DMA:
+		ret = i2c_msm_dma_xfer(ctrl);
+		break;
+	default:
+		ret = -EINTR;
+	}
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_SCAN_SUM,
+		((xfer->rx_cnt & 0xff) | ((xfer->rx_ovrhd_cnt & 0xff) << 16)),
+		((xfer->tx_cnt & 0xff) | ((xfer->tx_ovrhd_cnt & 0xff) << 16)),
+		((ctrl->xfer.timeout & 0xfff) | ((xfer->mode_id & 0xf) << 24)));
+
+	ret = i2c_msm_qup_post_xfer(ctrl, ret);
+	/* on success, return number of messages sent (which is index + 1)*/
+	if (!ret)
+		ret = xfer->cur_buf.msg_idx + 1;
+
+	i2c_msm_prof_evnt_add(ctrl, MSM_PROF, I2C_MSM_XFER_END, ret, xfer->err,
+						xfer->cur_buf.msg_idx + 1);
+	/* process and dump profiling data */
+	if (xfer->err || (ctrl->dbgfs.dbg_lvl >= MSM_PROF))
+		i2c_msm_prof_evnt_dump(ctrl);
+
+	i2c_msm_pm_xfer_end(ctrl);
+	return ret;
+}
+
+enum i2c_msm_dt_entry_status {
+	DT_REQ,  /* Required:  fail if missing */
+	DT_SGST, /* Suggested: warn if missing */
+	DT_OPT,  /* Optional:  don't warn if missing */
+};
+
+enum i2c_msm_dt_entry_type {
+	DT_U32,
+	DT_BOOL,
+	DT_ID,   /* of_alias_get_id() */
+};
+
+struct i2c_msm_dt_to_pdata_map {
+	const char                  *dt_name;
+	void                        *ptr_data;
+	enum i2c_msm_dt_entry_status status;
+	enum i2c_msm_dt_entry_type   type;
+	int                          default_val;
+};
+
+static int i2c_msm_dt_to_pdata_populate(struct i2c_msm_ctrl *ctrl,
+					struct platform_device *pdev,
+					struct i2c_msm_dt_to_pdata_map *itr)
+{
+	int  ret, err = 0;
+	struct device_node *node = pdev->dev.of_node;
+
+	for (; itr->dt_name ; ++itr) {
+		switch (itr->type) {
+		case DT_U32:
+			ret = of_property_read_u32(node, itr->dt_name,
+							 (u32 *) itr->ptr_data);
+			break;
+		case DT_BOOL:
+			*((bool *) itr->ptr_data) =
+				of_property_read_bool(node, itr->dt_name);
+			ret = 0;
+			break;
+		case DT_ID:
+			ret = of_alias_get_id(node, itr->dt_name);
+			if (ret >= 0) {
+				*((int *) itr->ptr_data) = ret;
+				ret = 0;
+			}
+			break;
+		default:
+			dev_err(ctrl->dev,
+				"error %d is of unknown DT entry type\n",
+				itr->type);
+			ret = -EBADE;
+		}
+
+		i2c_msm_dbg(ctrl, MSM_PROF, "DT entry ret:%d name:%s val:%d\n",
+				ret, itr->dt_name, *((int *)itr->ptr_data));
+
+		if (ret) {
+			*((int *)itr->ptr_data) = itr->default_val;
+
+			if (itr->status < DT_OPT) {
+				dev_err(ctrl->dev,
+					"error Missing '%s' DT entry\n",
+					itr->dt_name);
+
+				/* cont on err to dump all missing entries */
+				if (itr->status == DT_REQ && !err)
+					err = ret;
+			}
+		}
+	}
+
+	return err;
+}
+
+
+/*
+ * i2c_msm_rsrcs_process_dt: copy data from DT to platform data
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_process_dt(struct i2c_msm_ctrl *ctrl,
+					struct platform_device *pdev)
+{
+	u32 fs_clk_div, ht_clk_div, noise_rjct_scl, noise_rjct_sda;
+	int ret;
+
+	struct i2c_msm_dt_to_pdata_map map[] = {
+	{"i2c",				&pdev->id,	DT_REQ,  DT_ID,  -1},
+	{"qcom,clk-freq-out",		&ctrl->rsrcs.clk_freq_out,
+							DT_REQ,  DT_U32,  0},
+	{"qcom,clk-freq-in",		&ctrl->rsrcs.clk_freq_in,
+							DT_REQ,  DT_U32,  0},
+	{"qcom,disable-dma",		&(ctrl->rsrcs.disable_dma),
+							DT_OPT,  DT_BOOL, 0},
+	{"qcom,master-id",		&(ctrl->rsrcs.clk_path_vote.mstr_id),
+							DT_SGST, DT_U32,  0},
+	{"qcom,noise-rjct-scl",		&noise_rjct_scl,
+							DT_OPT,  DT_U32,  0},
+	{"qcom,noise-rjct-sda",		&noise_rjct_sda,
+							DT_OPT,  DT_U32,  0},
+	{"qcom,high-time-clk-div",	&ht_clk_div,
+							DT_OPT,  DT_U32,  0},
+	{"qcom,fs-clk-div",		&fs_clk_div,
+							DT_OPT,  DT_U32,  0},
+	{NULL,  NULL,					0,       0,       0},
+	};
+
+	ret = i2c_msm_dt_to_pdata_populate(ctrl, pdev, map);
+	if (ret)
+		return ret;
+
+	/* set divider and noise reject values */
+	return i2c_msm_set_mstr_clk_ctl(ctrl, fs_clk_div, ht_clk_div,
+						noise_rjct_scl, noise_rjct_sda);
+}
+
+/*
+ * i2c_msm_rsrcs_mem_init: reads pdata request region and ioremap it
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_mem_init(struct platform_device *pdev,
+						struct i2c_msm_ctrl *ctrl)
+{
+	struct resource *mem_region;
+
+	ctrl->rsrcs.mem = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"qup_phys_addr");
+	if (!ctrl->rsrcs.mem) {
+		dev_err(ctrl->dev, "error Missing 'qup_phys_addr' resource\n");
+		return -ENODEV;
+	}
+
+	mem_region = request_mem_region(ctrl->rsrcs.mem->start,
+					resource_size(ctrl->rsrcs.mem),
+					pdev->name);
+	if (!mem_region) {
+		dev_err(ctrl->dev,
+			"QUP physical memory region already claimed\n");
+		return -EBUSY;
+	}
+
+	ctrl->rsrcs.base = devm_ioremap(ctrl->dev, ctrl->rsrcs.mem->start,
+				   resource_size(ctrl->rsrcs.mem));
+	if (!ctrl->rsrcs.base) {
+		dev_err(ctrl->dev,
+			"error failed ioremap(base:0x%llx size:0x%llx\n)\n",
+			(u64) ctrl->rsrcs.mem->start,
+			(u64) resource_size(ctrl->rsrcs.mem));
+		release_mem_region(ctrl->rsrcs.mem->start,
+						resource_size(ctrl->rsrcs.mem));
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void i2c_msm_rsrcs_mem_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	release_mem_region(ctrl->rsrcs.mem->start,
+						resource_size(ctrl->rsrcs.mem));
+}
+
+/*
+ * i2c_msm_rsrcs_irq_init: finds irq num in pdata and requests it
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_irq_init(struct platform_device *pdev,
+						struct i2c_msm_ctrl *ctrl)
+{
+	int ret, irq;
+
+	irq = platform_get_irq_byname(pdev, "qup_irq");
+	if (irq < 0) {
+		dev_err(ctrl->dev, "error reading irq resource\n");
+		return irq;
+	}
+
+	ret = request_irq(irq, i2c_msm_qup_isr, IRQF_TRIGGER_HIGH,
+						"i2c-msm-v2-irq", ctrl);
+	if (ret) {
+		dev_err(ctrl->dev, "error request_irq(irq_num:%d ) ret:%d\n",
+								irq, ret);
+		return ret;
+	}
+
+	disable_irq(irq);
+	ctrl->rsrcs.irq = irq;
+	return 0;
+}
+
+static void i2c_msm_rsrcs_irq_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	free_irq(ctrl->rsrcs.irq, ctrl);
+}
+
+
+static struct pinctrl_state *
+i2c_msm_rsrcs_gpio_get_state(struct i2c_msm_ctrl *ctrl, const char *name)
+{
+	struct pinctrl_state *pin_state
+			= pinctrl_lookup_state(ctrl->rsrcs.pinctrl, name);
+
+	if (IS_ERR_OR_NULL(pin_state))
+		dev_info(ctrl->dev, "note pinctrl_lookup_state(%s) err:%ld\n",
+						name, PTR_ERR(pin_state));
+	return pin_state;
+}
+
+/*
+ * i2c_msm_rsrcs_gpio_pinctrl_init: initializes the pinctrl for i2c gpios
+ *
+ * @pre platform data must be initialized
+ */
+static int i2c_msm_rsrcs_gpio_pinctrl_init(struct i2c_msm_ctrl *ctrl)
+{
+	ctrl->rsrcs.pinctrl = devm_pinctrl_get(ctrl->dev);
+	if (IS_ERR_OR_NULL(ctrl->rsrcs.pinctrl)) {
+		dev_err(ctrl->dev, "error devm_pinctrl_get() failed err:%ld\n",
+				PTR_ERR(ctrl->rsrcs.pinctrl));
+		return PTR_ERR(ctrl->rsrcs.pinctrl);
+	}
+
+	ctrl->rsrcs.gpio_state_active =
+		i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_ACTIVE);
+
+	ctrl->rsrcs.gpio_state_suspend =
+		i2c_msm_rsrcs_gpio_get_state(ctrl, I2C_MSM_PINCTRL_SUSPEND);
+
+	return 0;
+}
+
+static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
+				bool runtime_active)
+{
+	struct pinctrl_state *pins_state;
+	const char           *pins_state_name;
+
+	if (runtime_active) {
+		pins_state      = ctrl->rsrcs.gpio_state_active;
+		pins_state_name = I2C_MSM_PINCTRL_ACTIVE;
+	} else {
+		pins_state      = ctrl->rsrcs.gpio_state_suspend;
+		pins_state_name = I2C_MSM_PINCTRL_SUSPEND;
+	}
+
+	if (!IS_ERR_OR_NULL(pins_state)) {
+		int ret = pinctrl_select_state(ctrl->rsrcs.pinctrl, pins_state);
+
+		if (ret)
+			dev_err(ctrl->dev,
+			"error pinctrl_select_state(%s) err:%d\n",
+			pins_state_name, ret);
+	} else {
+		dev_err(ctrl->dev,
+			"error pinctrl state-name:'%s' is not configured\n",
+			pins_state_name);
+	}
+}
+
+/*
+ * i2c_msm_rsrcs_clk_init: get clocks and set rate
+ *
+ * @return zero on success or negative error code
+ */
+static int i2c_msm_rsrcs_clk_init(struct i2c_msm_ctrl *ctrl)
+{
+	int ret = 0;
+
+	if ((ctrl->rsrcs.clk_freq_out <= 0) ||
+	    (ctrl->rsrcs.clk_freq_out > I2C_MSM_CLK_FAST_PLUS_FREQ)) {
+		dev_err(ctrl->dev,
+			"error clock frequency %dKHZ is not supported\n",
+			(ctrl->rsrcs.clk_freq_out / 1000));
+		return -EIO;
+	}
+
+	ctrl->rsrcs.core_clk = clk_get(ctrl->dev, "core_clk");
+	if (IS_ERR(ctrl->rsrcs.core_clk)) {
+		ret = PTR_ERR(ctrl->rsrcs.core_clk);
+		dev_err(ctrl->dev, "error on clk_get(core_clk):%d\n", ret);
+		return ret;
+	}
+
+	ret = clk_set_rate(ctrl->rsrcs.core_clk, ctrl->rsrcs.clk_freq_in);
+	if (ret) {
+		dev_err(ctrl->dev, "error on clk_set_rate(core_clk, %dKHz):%d\n",
+					(ctrl->rsrcs.clk_freq_in / 1000), ret);
+		goto err_set_rate;
+	}
+
+	ctrl->rsrcs.iface_clk = clk_get(ctrl->dev, "iface_clk");
+	if (IS_ERR(ctrl->rsrcs.iface_clk)) {
+		ret = PTR_ERR(ctrl->rsrcs.iface_clk);
+		dev_err(ctrl->dev, "error on clk_get(iface_clk):%d\n", ret);
+		goto err_set_rate;
+	}
+
+	return 0;
+
+err_set_rate:
+		clk_put(ctrl->rsrcs.core_clk);
+		ctrl->rsrcs.core_clk = NULL;
+	return ret;
+}
+
+static void i2c_msm_rsrcs_clk_teardown(struct i2c_msm_ctrl *ctrl)
+{
+	clk_put(ctrl->rsrcs.core_clk);
+	clk_put(ctrl->rsrcs.iface_clk);
+	i2c_msm_clk_path_teardown(ctrl);
+}
+
+
+
+static void i2c_msm_pm_suspend(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	if (ctrl->pwr_state == I2C_MSM_PM_RT_SUSPENDED) {
+		dev_err(ctrl->dev, "attempt to suspend when suspended\n");
+		return;
+	}
+	i2c_msm_dbg(ctrl, MSM_DBG, "suspending...\n");
+	i2c_msm_pm_clk_unprepare(ctrl);
+	i2c_msm_clk_path_unvote(ctrl);
+
+	/*
+	 * We implement system and runtime suspend in the same way. However
+	 * it is important for us to distinguish between them in when servicing
+	 * a transfer requests. If we get transfer request while in runtime
+	 * suspend we want to simply wake up and service that request. But if we
+	 * get a transfer request while system is suspending we want to bail
+	 * out on that request. This is why if we marked that we are in system
+	 * suspend, we do not want to override that state with runtime suspend.
+	 */
+	if (ctrl->pwr_state != I2C_MSM_PM_SYS_SUSPENDED)
+		ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
+}
+
+static int i2c_msm_pm_resume(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	if (ctrl->pwr_state == I2C_MSM_PM_RT_ACTIVE)
+		return 0;
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "resuming...\n");
+
+	i2c_msm_clk_path_vote(ctrl);
+	i2c_msm_pm_clk_prepare(ctrl);
+	ctrl->pwr_state = I2C_MSM_PM_RT_ACTIVE;
+	return 0;
+}
+
+#ifdef CONFIG_PM
+/*
+ * i2c_msm_pm_sys_suspend_noirq: system power management callback
+ */
+static int i2c_msm_pm_sys_suspend_noirq(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+	enum i2c_msm_power_state prev_state = ctrl->pwr_state;
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...\n");
+
+	/* Acquire mutex to ensure current transaction is over */
+	mutex_lock(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
+	mutex_unlock(&ctrl->xfer.mtx);
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: suspending...\n");
+
+	if (prev_state == I2C_MSM_PM_RT_ACTIVE) {
+		i2c_msm_pm_suspend(dev);
+		/*
+		 * Synchronize runtime-pm and system-pm states:
+		 * at this point we are already suspended. However, the
+		 * runtime-PM framework still thinks that we are active.
+		 * The three calls below let the runtime-PM know that we are
+		 * suspended already without re-invoking the suspend callback
+		 */
+		pm_runtime_disable(dev);
+		pm_runtime_set_suspended(dev);
+		pm_runtime_enable(dev);
+	}
+
+	return 0;
+}
+
+/*
+ * i2c_msm_pm_sys_resume: system power management callback
+ * shifts the controller's power state from system suspend to runtime suspend
+ */
+static int i2c_msm_pm_sys_resume_noirq(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_sys_noirq: resuming...\n");
+	mutex_lock(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
+	mutex_unlock(&ctrl->xfer.mtx);
+	return  0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static void i2c_msm_pm_rt_init(struct device *dev)
+{
+	pm_runtime_set_suspended(dev);
+	pm_runtime_set_autosuspend_delay(dev, (MSEC_PER_SEC >> 2));
+	pm_runtime_use_autosuspend(dev);
+	pm_runtime_enable(dev);
+}
+
+/*
+ * i2c_msm_pm_rt_suspend: runtime power management callback
+ */
+static int i2c_msm_pm_rt_suspend(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: suspending...\n");
+	i2c_msm_pm_suspend(dev);
+	return 0;
+}
+
+/*
+ * i2c_msm_pm_rt_resume: runtime power management callback
+ */
+static int i2c_msm_pm_rt_resume(struct device *dev)
+{
+	struct i2c_msm_ctrl *ctrl = dev_get_drvdata(dev);
+
+	i2c_msm_dbg(ctrl, MSM_DBG, "pm_runtime: resuming...\n");
+	return  i2c_msm_pm_resume(dev);
+}
+
+#else
+static void i2c_msm_pm_rt_init(struct device *dev) {}
+#define i2c_msm_pm_rt_suspend NULL
+#define i2c_msm_pm_rt_resume NULL
+#endif
+
+static const struct dev_pm_ops i2c_msm_pm_ops = {
+#ifdef CONFIG_PM_SLEEP
+	.suspend_noirq		= i2c_msm_pm_sys_suspend_noirq,
+	.resume_noirq		= i2c_msm_pm_sys_resume_noirq,
+#endif
+	SET_RUNTIME_PM_OPS(i2c_msm_pm_rt_suspend,
+			   i2c_msm_pm_rt_resume,
+			   NULL)
+};
+
+static u32 i2c_msm_frmwrk_func(struct i2c_adapter *adap)
+{
+	return I2C_FUNC_I2C | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK);
+}
+
+static const struct i2c_algorithm i2c_msm_frmwrk_algrtm = {
+	.master_xfer	= i2c_msm_frmwrk_xfer,
+	.functionality	= i2c_msm_frmwrk_func,
+};
+
+static const char * const i2c_msm_adapter_name = "MSM-I2C-v2-adapter";
+
+static int i2c_msm_frmwrk_reg(struct platform_device *pdev,
+						struct i2c_msm_ctrl *ctrl)
+{
+	int ret;
+
+	i2c_set_adapdata(&ctrl->adapter, ctrl);
+	ctrl->adapter.algo = &i2c_msm_frmwrk_algrtm;
+	strlcpy(ctrl->adapter.name, i2c_msm_adapter_name,
+						sizeof(ctrl->adapter.name));
+
+	ctrl->adapter.nr = pdev->id;
+	ctrl->adapter.dev.parent = &pdev->dev;
+	ctrl->adapter.dev.of_node = pdev->dev.of_node;
+	ret = i2c_add_numbered_adapter(&ctrl->adapter);
+	if (ret) {
+		dev_err(ctrl->dev, "error i2c_add_adapter failed\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static void i2c_msm_frmwrk_unreg(struct i2c_msm_ctrl *ctrl)
+{
+	i2c_del_adapter(&ctrl->adapter);
+}
+
+static int i2c_msm_probe(struct platform_device *pdev)
+{
+	struct i2c_msm_ctrl *ctrl;
+	int ret = 0;
+
+	ctrl = devm_kzalloc(&pdev->dev, sizeof(*ctrl), GFP_KERNEL);
+	if (!ctrl)
+		return -ENOMEM;
+	ctrl->dev = &pdev->dev;
+	platform_set_drvdata(pdev, ctrl);
+	ctrl->dbgfs.dbg_lvl         = DEFAULT_DBG_LVL;
+	ctrl->dbgfs.force_xfer_mode = I2C_MSM_XFER_MODE_NONE;
+	mutex_init(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_RT_SUSPENDED;
+
+	if (!pdev->dev.of_node) {
+		dev_err(&pdev->dev, "error: null device-tree node\n");
+		return -EBADE;
+	}
+
+	ret = i2c_msm_rsrcs_process_dt(ctrl, pdev);
+	if (ret) {
+		dev_err(ctrl->dev, "error in process device tree node\n");
+		return ret;
+	}
+
+	ret = i2c_msm_rsrcs_mem_init(pdev, ctrl);
+	if (ret)
+		goto mem_err;
+
+	ret = i2c_msm_rsrcs_clk_init(ctrl);
+	if (ret)
+		goto clk_err;
+
+	/* vote for clock to enable reading the version number off the HW */
+	i2c_msm_clk_path_vote(ctrl);
+
+	ret = i2c_msm_pm_clk_prepare(ctrl);
+	if (ret)
+		goto clk_err;
+
+	ret = i2c_msm_pm_clk_enable(ctrl);
+	if (ret) {
+		i2c_msm_pm_clk_unprepare(ctrl);
+		goto clk_err;
+	}
+
+	/*
+	 * reset the core before registering for interrupts. This solves an
+	 * interrupt storm issue when the bootloader leaves a pending interrupt.
+	 */
+	ret = i2c_msm_qup_sw_reset(ctrl);
+	if (ret)
+		dev_err(ctrl->dev, "error error on qup software reset\n");
+
+	i2c_msm_pm_clk_disable(ctrl);
+	i2c_msm_pm_clk_unprepare(ctrl);
+	i2c_msm_clk_path_unvote(ctrl);
+
+	ret = i2c_msm_rsrcs_gpio_pinctrl_init(ctrl);
+	if (ret)
+		goto err_no_pinctrl;
+
+	i2c_msm_pm_rt_init(ctrl->dev);
+
+	ret = i2c_msm_rsrcs_irq_init(pdev, ctrl);
+	if (ret)
+		goto irq_err;
+
+	i2c_msm_dbgfs_init(ctrl);
+
+	ret = i2c_msm_frmwrk_reg(pdev, ctrl);
+	if (ret)
+		goto reg_err;
+
+	i2c_msm_dbg(ctrl, MSM_PROF, "probe() completed with success\n");
+	return 0;
+
+reg_err:
+	i2c_msm_dbgfs_teardown(ctrl);
+	i2c_msm_rsrcs_irq_teardown(ctrl);
+irq_err:
+	i2x_msm_blk_free_cache(ctrl);
+err_no_pinctrl:
+	i2c_msm_rsrcs_clk_teardown(ctrl);
+clk_err:
+	i2c_msm_rsrcs_mem_teardown(ctrl);
+mem_err:
+	dev_err(ctrl->dev, "error probe() failed with err:%d\n", ret);
+	return ret;
+}
+
+static int i2c_msm_remove(struct platform_device *pdev)
+{
+	struct i2c_msm_ctrl *ctrl = platform_get_drvdata(pdev);
+
+	/* Grab mutex to ensure ongoing transaction is over */
+	mutex_lock(&ctrl->xfer.mtx);
+	ctrl->pwr_state = I2C_MSM_PM_SYS_SUSPENDED;
+	pm_runtime_disable(ctrl->dev);
+	/* no one can call a xfer after the next line */
+	i2c_msm_frmwrk_unreg(ctrl);
+	mutex_unlock(&ctrl->xfer.mtx);
+	mutex_destroy(&ctrl->xfer.mtx);
+
+	i2c_msm_dma_teardown(ctrl);
+	i2c_msm_dbgfs_teardown(ctrl);
+	i2c_msm_rsrcs_irq_teardown(ctrl);
+	i2c_msm_rsrcs_clk_teardown(ctrl);
+	i2c_msm_rsrcs_mem_teardown(ctrl);
+	i2x_msm_blk_free_cache(ctrl);
+	return 0;
+}
+
+static const struct of_device_id i2c_msm_dt_match[] = {
+	{
+		.compatible = "qcom,i2c-msm-v2",
+	},
+	{}
+};
+
+static struct platform_driver i2c_msm_driver = {
+	.probe  = i2c_msm_probe,
+	.remove = i2c_msm_remove,
+	.driver = {
+		.name           = "i2c-msm-v2",
+		.pm             = &i2c_msm_pm_ops,
+		.of_match_table = i2c_msm_dt_match,
+	},
+};
+
+static int i2c_msm_init(void)
+{
+	return platform_driver_register(&i2c_msm_driver);
+}
+subsys_initcall(i2c_msm_init);
+
+static void i2c_msm_exit(void)
+{
+	platform_driver_unregister(&i2c_msm_driver);
+}
+module_exit(i2c_msm_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:i2c-msm-v2");
diff --git a/drivers/leds/leds-qti-flash.c b/drivers/leds/leds-qti-flash.c
index b0ca27b..4457cf6 100644
--- a/drivers/leds/leds-qti-flash.c
+++ b/drivers/leds/leds-qti-flash.c
@@ -859,6 +859,51 @@
 	return scnprintf(buf, PAGE_SIZE, "%d\n", snode->led->max_current);
 }
 
+static int qti_flash_led_regulator_control(struct led_classdev *led_cdev,
+					int options)
+{
+	struct flash_switch_data *snode;
+	union power_supply_propval ret = {0, };
+	int rc = 0;
+
+	snode = container_of(led_cdev, struct flash_switch_data, cdev);
+
+	if (snode->led->data->pmic_type != PM2250)
+		return 0;
+
+	rc = is_main_psy_available(snode->led);
+	if (rc < 0)
+		return rc;
+
+	if (options & ENABLE_REGULATOR) {
+		ret.intval = 1;
+		rc = power_supply_set_property(snode->led->main_psy,
+				POWER_SUPPLY_PROP_FLASH_ACTIVE,
+				&ret);
+		if (rc < 0) {
+			pr_err("Failed to set FLASH_ACTIVE on charger rc=%d\n",
+							rc);
+			return rc;
+		}
+
+		pr_debug("FLASH_ACTIVE = 1\n");
+	} else if (options & DISABLE_REGULATOR) {
+		ret.intval = 0;
+		rc = power_supply_set_property(snode->led->main_psy,
+				POWER_SUPPLY_PROP_FLASH_ACTIVE,
+				&ret);
+		if (rc < 0) {
+			pr_err("Failed to set FLASH_ACTIVE on charger rc=%d\n",
+							rc);
+			return rc;
+		}
+
+		pr_debug("FLASH_ACTIVE = 0\n");
+	}
+
+	return 0;
+}
+
 int qti_flash_led_prepare(struct led_trigger *trig, int options,
 				int *max_current)
 {
@@ -901,18 +946,42 @@
 		return 0;
 	}
 
-	if (options & ENABLE_REGULATOR)
-		return 0;
+	rc = qti_flash_led_regulator_control(led_cdev, options);
+	if (rc < 0)
+		pr_err("Failed to set flash control options\n");
 
-	if (options & DISABLE_REGULATOR)
-		return 0;
-
-	return -EINVAL;
+	return rc;
 }
 EXPORT_SYMBOL(qti_flash_led_prepare);
 
+static ssize_t qti_flash_led_prepare_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	int rc, options;
+	u32 val;
+	struct led_classdev *led_cdev = dev_get_drvdata(dev);
+
+	rc = kstrtouint(buf, 0, &val);
+	if (rc < 0)
+		return rc;
+
+	if (val != 0 && val != 1)
+		return count;
+
+	options = val ? ENABLE_REGULATOR : DISABLE_REGULATOR;
+
+	rc = qti_flash_led_regulator_control(led_cdev, options);
+	if (rc < 0) {
+		pr_err("failed to query led regulator\n");
+		return rc;
+	}
+
+	return count;
+}
+
 static struct device_attribute qti_flash_led_attrs[] = {
 	__ATTR(max_current, 0664, qti_flash_led_max_current_show, NULL),
+	__ATTR(enable, 0664, NULL, qti_flash_led_prepare_store),
 };
 
 static int qti_flash_brightness_set_blocking(
diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
index f483926..45c8bd94 100644
--- a/drivers/mmc/core/queue.c
+++ b/drivers/mmc/core/queue.c
@@ -402,7 +402,7 @@
 
 	init_waitqueue_head(&mq->wait);
 
-	if (host->cqe_ops->cqe_crypto_update_queue)
+	if (host->cqe_ops && host->cqe_ops->cqe_crypto_update_queue)
 		host->cqe_ops->cqe_crypto_update_queue(host, mq->queue);
 }
 
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 9d097b8..18000da 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -1283,6 +1283,20 @@
 	readl_relaxed(host->ioaddr + SDCC_DEBUG_FEATURE_CFG_REG);
 }
 
+/* Dummy func for Mask and Match show */
+static ssize_t show_mask_and_match(struct device *dev,
+				struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+
+	if (!host)
+		return -EINVAL;
+
+	pr_info("%s: M&M show func\n", mmc_hostname(host->mmc));
+
+	return 0;
+}
+
 static ssize_t store_mask_and_match(struct device *dev,
 		struct device_attribute *attr, const char *buf, size_t count)
 {
@@ -5760,6 +5774,7 @@
 
 	if (IPCAT_MINOR_MASK(readl_relaxed(host->ioaddr +
 				SDCC_IP_CATALOG)) >= 2) {
+		msm_host->mask_and_match.show = show_mask_and_match;
 		msm_host->mask_and_match.store = store_mask_and_match;
 		sysfs_attr_init(&msm_host->mask_and_match.attr);
 		msm_host->mask_and_match.attr.name = "mask_and_match";
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 6a91e49..3857dc6 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -1064,16 +1064,20 @@
 		ret = 0;
 		break;
 	case CNSS_MHI_SUSPEND:
+		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
 		if (pci_priv->drv_connected_last)
 			ret = mhi_pm_fast_suspend(pci_priv->mhi_ctrl, true);
 		else
 			ret = mhi_pm_suspend(pci_priv->mhi_ctrl);
+		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
 		break;
 	case CNSS_MHI_RESUME:
+		mutex_lock(&pci_priv->mhi_ctrl->pm_mutex);
 		if (pci_priv->drv_connected_last)
 			ret = mhi_pm_fast_resume(pci_priv->mhi_ctrl, true);
 		else
 			ret = mhi_pm_resume(pci_priv->mhi_ctrl);
+		mutex_unlock(&pci_priv->mhi_ctrl->pm_mutex);
 		break;
 	case CNSS_MHI_TRIGGER_RDDM:
 		ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
@@ -4261,6 +4265,29 @@
 	return 0;
 }
 
+static int cnss_mhi_bw_scale(struct mhi_controller *mhi_ctrl,
+			     struct mhi_link_info *link_info)
+{
+	struct cnss_pci_data *pci_priv = mhi_ctrl->priv_data;
+	int ret = 0;
+
+	ret = msm_pcie_set_link_bandwidth(pci_priv->pci_dev,
+					  link_info->target_link_speed,
+					  link_info->target_link_width);
+
+	if (ret)
+		return ret;
+
+	pci_priv->def_link_speed = link_info->target_link_speed;
+	pci_priv->def_link_width = link_info->target_link_width;
+
+	cnss_pr_dbg("Setting link speed:0x%x, width:0x%x\n",
+		    link_info->target_link_speed,
+		    link_info->target_link_width);
+
+	return 0;
+}
+
 static int cnss_pci_register_mhi(struct cnss_pci_data *pci_priv)
 {
 	int ret = 0;
@@ -4312,6 +4339,7 @@
 	mhi_ctrl->status_cb = cnss_mhi_notify_status;
 	mhi_ctrl->runtime_get = cnss_mhi_pm_runtime_get;
 	mhi_ctrl->runtime_put = cnss_mhi_pm_runtime_put_noidle;
+	mhi_ctrl->bw_scale = cnss_mhi_bw_scale;
 
 	mhi_ctrl->rddm_size = pci_priv->plat_priv->ramdump_info_v2.ramdump_size;
 	mhi_ctrl->sbl_size = SZ_512K;
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index 4519473..ad15e0d 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -483,6 +483,28 @@
 }
 EXPORT_SYMBOL(ipa_uc_offload_conn_pipes);
 
+int ipa_set_perf_profile(struct ipa_perf_profile *profile)
+{
+	if (!profile) {
+		IPA_UC_OFFLOAD_ERR("Invalid input\n");
+		return -EINVAL;
+	}
+
+	if (profile->client != IPA_CLIENT_ETHERNET_PROD &&
+		profile->client != IPA_CLIENT_ETHERNET_CONS) {
+		IPA_UC_OFFLOAD_ERR("not supported\n");
+		return -EINVAL;
+	}
+
+	IPA_UC_OFFLOAD_DBG("setting throughput to %d\n",
+		profile->max_supported_bw_mbps);
+
+	return ipa_pm_set_throughput(
+		ipa_uc_offload_ctx[IPA_UC_NTN]->pm_hdl,
+		profile->max_supported_bw_mbps);
+}
+EXPORT_SYMBOL(ipa_set_perf_profile);
+
 static int ipa_uc_ntn_disconn_pipes(struct ipa_uc_offload_ctx *ntn_ctx)
 {
 	int ipa_ep_idx_ul, ipa_ep_idx_dl;
diff --git a/drivers/power/supply/qcom/qg-core.h b/drivers/power/supply/qcom/qg-core.h
index 8704699..b5716c3 100644
--- a/drivers/power/supply/qcom/qg-core.h
+++ b/drivers/power/supply/qcom/qg-core.h
@@ -66,6 +66,7 @@
 	bool			esr_disable;
 	bool			esr_discharge_enable;
 	bool			qg_ext_sense;
+	bool			use_cp_iin_sns;
 	bool			use_s7_ocv;
 	bool			qg_sleep_config;
 	bool			qg_fast_chg_cfg;
@@ -122,6 +123,7 @@
 	struct power_supply	*usb_psy;
 	struct power_supply	*dc_psy;
 	struct power_supply	*parallel_psy;
+	struct power_supply	*cp_psy;
 	struct qg_esr_data	esr_data[QG_MAX_ESR_COUNT];
 
 	/* status variable */
diff --git a/drivers/power/supply/qcom/qg-util.c b/drivers/power/supply/qcom/qg-util.c
index 170ca87..36b5fef 100644
--- a/drivers/power/supply/qcom/qg-util.c
+++ b/drivers/power/supply/qcom/qg-util.c
@@ -307,7 +307,7 @@
 	return is_usb_present(chip) || is_dc_present(chip);
 }
 
-static bool is_parallel_available(struct qpnp_qg *chip)
+bool is_parallel_available(struct qpnp_qg *chip)
 {
 	if (chip->parallel_psy)
 		return true;
@@ -319,6 +319,18 @@
 	return true;
 }
 
+bool is_cp_available(struct qpnp_qg *chip)
+{
+	if (chip->cp_psy)
+		return true;
+
+	chip->cp_psy = power_supply_get_by_name("charge_pump_master");
+	if (!chip->cp_psy)
+		return false;
+
+	return true;
+}
+
 bool is_parallel_enabled(struct qpnp_qg *chip)
 {
 	union power_supply_propval pval = {0, };
@@ -326,6 +338,9 @@
 	if (is_parallel_available(chip)) {
 		power_supply_get_property(chip->parallel_psy,
 			POWER_SUPPLY_PROP_CHARGING_ENABLED, &pval);
+	} else if (is_cp_available(chip)) {
+		power_supply_get_property(chip->cp_psy,
+			POWER_SUPPLY_PROP_CP_ENABLE, &pval);
 	}
 
 	return pval.intval ? true : false;
diff --git a/drivers/power/supply/qcom/qg-util.h b/drivers/power/supply/qcom/qg-util.h
index 7a1892b..ae9ef81 100644
--- a/drivers/power/supply/qcom/qg-util.h
+++ b/drivers/power/supply/qcom/qg-util.h
@@ -19,6 +19,8 @@
 bool is_dc_present(struct qpnp_qg *chip);
 bool is_input_present(struct qpnp_qg *chip);
 bool is_parallel_enabled(struct qpnp_qg *chip);
+bool is_cp_available(struct qpnp_qg *chip);
+bool is_parallel_available(struct qpnp_qg *chip);
 int qg_write_monotonic_soc(struct qpnp_qg *chip, int msoc);
 int qg_get_battery_temp(struct qpnp_qg *chip, int *batt_temp);
 int qg_get_battery_current(struct qpnp_qg *chip, int *ibat_ua);
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 4a29912..956f75f 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -2442,13 +2442,15 @@
 		"Parallel status changed Enabled=%d\n", parallel_enabled);
 
 	mutex_lock(&chip->data_lock);
-
 	/*
-	 * Parallel charger uses the same external sense, hence do not
-	 * enable SMB sensing if PMI632 is configured for external sense.
+	 * dt.qg_ext_sense = Uses external rsense, if defined do not
+	 *		     enable SMB sensing (for non-CP parallel charger).
+	 * dt.cp_iin_sns = Uses CP IIN_SNS, enable SMB sensing (for CP charger).
 	 */
-	if (!chip->dt.qg_ext_sense)
-		update_smb = true;
+	if (is_cp_available(chip))
+		update_smb = chip->dt.use_cp_iin_sns ? true : false;
+	else if (is_parallel_available(chip))
+		update_smb = chip->dt.qg_ext_sense ? false : true;
 
 	rc = process_rt_fifo_data(chip, update_smb);
 	if (rc < 0)
@@ -2699,7 +2701,8 @@
 	if ((strcmp(psy->desc->name, "battery") == 0)
 		|| (strcmp(psy->desc->name, "parallel") == 0)
 		|| (strcmp(psy->desc->name, "usb") == 0)
-		|| (strcmp(psy->desc->name, "dc") == 0)) {
+		|| (strcmp(psy->desc->name, "dc") == 0)
+		|| (strcmp(psy->desc->name, "charge_pump_master") == 0)) {
 		/*
 		 * We cannot vote for awake votable here as that takes
 		 * a mutex lock and this is executed in an atomic context.
@@ -4340,6 +4343,9 @@
 
 	chip->dt.qg_ext_sense = of_property_read_bool(node, "qcom,qg-ext-sns");
 
+	chip->dt.use_cp_iin_sns = of_property_read_bool(node,
+							"qcom,use-cp-iin-sns");
+
 	chip->dt.use_s7_ocv = of_property_read_bool(node, "qcom,qg-use-s7-ocv");
 
 	rc = of_property_read_u32(node, "qcom,min-sleep-time-secs", &temp);
diff --git a/drivers/power/supply/qcom/qpnp-smblite.c b/drivers/power/supply/qcom/qpnp-smblite.c
index 5f8c714..f0d5ece 100644
--- a/drivers/power/supply/qcom/qpnp-smblite.c
+++ b/drivers/power/supply/qcom/qpnp-smblite.c
@@ -433,9 +433,6 @@
 	case POWER_SUPPLY_PROP_SCOPE:
 		rc = smblite_lib_get_prop_scope(chg, val);
 		break;
-	case POWER_SUPPLY_PROP_FLASH_TRIGGER:
-		rc = schgm_flashlite_get_vreg_ok(chg, &val->intval);
-		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -530,6 +527,7 @@
 	POWER_SUPPLY_PROP_FCC_DELTA,
 	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_FLASH_TRIGGER,
+	POWER_SUPPLY_PROP_FLASH_ACTIVE,
 };
 
 static int smblite_usb_main_get_prop(struct power_supply *psy,
@@ -564,6 +562,12 @@
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		rc = smblite_lib_get_icl_current(chg, &val->intval);
 		break;
+	case POWER_SUPPLY_PROP_FLASH_TRIGGER:
+		rc = schgm_flashlite_get_vreg_ok(chg, &val->intval);
+		break;
+	case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+		val->intval = chg->flash_active;
+		break;
 	default:
 		pr_debug("get prop %d is not supported in usb-main\n", psp);
 		rc = -EINVAL;
@@ -582,6 +586,7 @@
 	struct smblite *chip = power_supply_get_drvdata(psy);
 	struct smb_charger *chg = &chip->chg;
 	int rc = 0;
+	union power_supply_propval pval = {0, };
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
@@ -595,6 +600,21 @@
 	case POWER_SUPPLY_PROP_CURRENT_MAX:
 		rc = smblite_lib_set_icl_current(chg, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_FLASH_ACTIVE:
+		if (chg->flash_active != val->intval) {
+			chg->flash_active = val->intval;
+
+			rc = smblite_lib_get_prop_usb_present(chg, &pval);
+			if (rc < 0)
+				pr_err("Failed to get USB present status rc=%d\n",
+						rc);
+			if (!pval.intval) {
+				/* vote 100ma when usb is not present*/
+				vote(chg->usb_icl_votable, SW_ICL_MAX_VOTER,
+							true, USBIN_100UA);
+			}
+		}
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
diff --git a/drivers/soc/qcom/icnss2/main.c b/drivers/soc/qcom/icnss2/main.c
index 2cea0df..d32c795 100644
--- a/drivers/soc/qcom/icnss2/main.c
+++ b/drivers/soc/qcom/icnss2/main.c
@@ -534,6 +534,8 @@
 
 	set_bit(ICNSS_WLFW_EXISTS, &priv->state);
 	clear_bit(ICNSS_FW_DOWN, &priv->state);
+	clear_bit(ICNSS_FW_READY, &priv->state);
+
 	icnss_ignore_fw_timeout(false);
 
 	if (test_bit(ICNSS_WLFW_CONNECTED, &penv->state)) {
diff --git a/drivers/soc/qcom/icnss2/qmi.c b/drivers/soc/qcom/icnss2/qmi.c
index 225afb1..6c8d8f1 100644
--- a/drivers/soc/qcom/icnss2/qmi.c
+++ b/drivers/soc/qcom/icnss2/qmi.c
@@ -1933,12 +1933,22 @@
 
 int icnss_clear_server(struct icnss_priv *priv)
 {
+	int ret;
+
 	if (!priv)
 		return -ENODEV;
 
 	icnss_pr_info("QMI Service Disconnected: 0x%lx\n", priv->state);
 	clear_bit(ICNSS_WLFW_CONNECTED, &priv->state);
 
+	icnss_unregister_fw_service(priv);
+
+	ret =  icnss_register_fw_service(priv);
+	if (ret < 0) {
+		icnss_pr_err("WLFW server registration failed\n");
+		ICNSS_ASSERT(0);
+	}
+
 	return 0;
 }
 
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 4e52520..6754d13 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -695,8 +695,8 @@
 	attr.sched_nice = ION_KTHREAD_NICE_VAL;
 	buf = cached ? "cached" : "uncached";
 
-	thread = kthread_create(ion_sys_heap_worker, pools,
-				"ion-pool-%s-worker", buf);
+	thread = kthread_run(ion_sys_heap_worker, pools,
+			     "ion-pool-%s-worker", buf);
 	if (IS_ERR(thread)) {
 		pr_err("%s: failed to create %s worker thread: %ld\n",
 		       __func__, buf, PTR_ERR(thread));
diff --git a/drivers/thermal/msm-tsens.c b/drivers/thermal/msm-tsens.c
index c409961..d79e32f 100644
--- a/drivers/thermal/msm-tsens.c
+++ b/drivers/thermal/msm-tsens.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/err.h>
@@ -273,7 +273,7 @@
 			}
 			TSENS_DBG(tmdev, "Calling trip_temp for sensor %d\n",
 					i);
-			of_thermal_handle_trip_temp(tmdev->sensor[i].tzd, temp);
+			of_thermal_handle_trip(tmdev->sensor[i].tzd);
 		}
 	}
 	if (tmdev->min_temp_sensor_id != MIN_TEMP_DEF_OFFSET) {
@@ -284,7 +284,7 @@
 			return;
 		}
 		TSENS_DBG(tmdev, "Calling trip_temp for sensor %d\n", i);
-		of_thermal_handle_trip_temp(tmdev->min_temp.tzd, temp);
+		of_thermal_handle_trip(tmdev->min_temp.tzd);
 	}
 }
 
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 6abde83..eb06819 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -172,7 +172,8 @@
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx);
+			bool drop_rx,
+			unsigned long *flags);
 	struct device *wrapper_dev;
 	struct se_geni_rsc serial_rsc;
 	dma_addr_t tx_dma;
@@ -212,12 +213,12 @@
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx);
+			bool drop_rx, unsigned long *flags);
 static int handle_rx_hs(struct uart_port *uport,
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx);
+			bool drop_rx, unsigned long *flags);
 static unsigned int msm_geni_serial_tx_empty(struct uart_port *port);
 static int msm_geni_serial_power_on(struct uart_port *uport);
 static void msm_geni_serial_power_off(struct uart_port *uport);
@@ -236,7 +237,8 @@
 
 static struct msm_geni_serial_port msm_geni_console_port;
 static struct msm_geni_serial_port msm_geni_serial_ports[GENI_UART_NR_PORTS];
-static void msm_geni_serial_handle_isr(struct uart_port *uport);
+static void msm_geni_serial_handle_isr(struct uart_port *uport,
+				unsigned long *flags);
 
 /*
  * The below API is required to check if uport->lock (spinlock)
@@ -297,8 +299,8 @@
 	}
 }
 
-/* Disable the interrupts in order to do polling in an atomic contexts. */
-static void msm_geni_serial_disable_interrupts(struct uart_port *uport)
+/* Try disabling interrupts in order to do polling in an atomic contexts. */
+static bool msm_serial_try_disable_interrupts(struct uart_port *uport)
 {
 	unsigned int geni_m_irq_en, geni_s_irq_en;
 	unsigned int dma_m_irq_en, dma_s_irq_en;
@@ -309,7 +311,7 @@
 	 * by framework as we can rely on ISR.
 	 */
 	if (!msm_geni_serial_spinlocked(uport))
-		return;
+		return false;
 
 	geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	geni_s_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_S_IRQ_EN);
@@ -335,6 +337,8 @@
 		geni_write_reg_nolog(dma_s_irq_en, uport->membase,
 							SE_DMA_RX_IRQ_EN);
 	}
+
+	return true;
 }
 
 /*
@@ -343,17 +347,18 @@
  * in a non-atomic context. This API decides wheather to poll for
  * interrupt or depend on the isr based on in_atomic() call.
  */
-bool geni_wait_for_cmd_done(struct uart_port *uport)
+bool geni_wait_for_cmd_done(struct uart_port *uport, bool is_irq_masked)
 {
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 	unsigned long timeout = POLL_ITERATIONS;
 	unsigned long ret;
+	unsigned long flags = 0;
 
 	/*
 	 * We need to do polling if spinlock is taken
 	 * by framework as we cannot rely on ISR.
 	 */
-	if (msm_geni_serial_spinlocked(uport)) {
+	if (is_irq_masked) {
 		/*
 		 * Polling is done for 1000 iterrations with
 		 * 10 usecs interval which in total accumulates
@@ -361,13 +366,13 @@
 		 */
 		if (msm_port->m_cmd) {
 			while (!msm_port->m_cmd_done && timeout > 0) {
-				msm_geni_serial_handle_isr(uport);
+				msm_geni_serial_handle_isr(uport, &flags);
 				timeout--;
 				udelay(100);
 			}
 		} else if (msm_port->s_cmd) {
 			while (!msm_port->s_cmd_done && timeout > 0) {
-				msm_geni_serial_handle_isr(uport);
+				msm_geni_serial_handle_isr(uport, &flags);
 				timeout--;
 				udelay(100);
 			}
@@ -870,7 +875,7 @@
 	 * Ensure FIFO write goes through before polling for status but.
 	 */
 	mb();
-	msm_geni_serial_disable_interrupts(uport);
+	msm_serial_try_disable_interrupts(uport);
 	msm_geni_serial_poll_tx_done(uport);
 	msm_geni_serial_enable_interrupts(uport);
 }
@@ -933,7 +938,7 @@
 		mb();
 		i += chars_to_write;
 	}
-	msm_geni_serial_disable_interrupts(uport);
+	msm_serial_try_disable_interrupts(uport);
 	msm_geni_serial_poll_tx_done(uport);
 	msm_geni_serial_enable_interrupts(uport);
 }
@@ -947,6 +952,7 @@
 	unsigned long flags;
 	unsigned int geni_status;
 	bool timeout;
+	bool is_irq_masked;
 	int irq_en;
 
 	/* Max 1 port supported as of now */
@@ -965,14 +971,19 @@
 	geni_status = readl_relaxed(uport->membase + SE_GENI_STATUS);
 
 	/* Cancel the current write to log the fault */
-	if (!locked) {
+	if ((geni_status & M_GENI_CMD_ACTIVE) && !locked) {
 		port->m_cmd_done = false;
 		port->m_cmd = true;
 		reinit_completion(&port->m_cmd_timeout);
-		msm_geni_serial_disable_interrupts(uport);
+		is_irq_masked = msm_serial_try_disable_interrupts(uport);
 		geni_cancel_m_cmd(uport->membase);
 
-		timeout = geni_wait_for_cmd_done(uport);
+		/*
+		 * console should be in polling mode. Hence directly pass true
+		 * as argument for wait_for_cmd_done here to handle cancel tx
+		 * in polling mode.
+		 */
+		timeout = geni_wait_for_cmd_done(uport, true);
 		if (timeout) {
 			IPC_LOG_MSG(port->console_log,
 				"%s: tx_cancel failed 0x%x\n",
@@ -981,7 +992,7 @@
 
 			reinit_completion(&port->m_cmd_timeout);
 			geni_abort_m_cmd(uport->membase);
-			timeout = geni_wait_for_cmd_done(uport);
+			timeout = geni_wait_for_cmd_done(uport, true);
 			if (timeout)
 				IPC_LOG_MSG(port->console_log,
 				"%s: tx abort failed 0x%x\n", __func__,
@@ -996,7 +1007,7 @@
 		/* It seems we can interrupt existing transfers unless all data
 		 * has been sent, in which case we need to look for done first.
 		 */
-		msm_geni_serial_disable_interrupts(uport);
+		msm_serial_try_disable_interrupts(uport);
 		msm_geni_serial_poll_tx_done(uport);
 		msm_geni_serial_enable_interrupts(uport);
 
@@ -1022,13 +1033,13 @@
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx)
+			bool drop_rx, unsigned long *flags)
 {
 	int i, c;
 	unsigned char *rx_char;
-	unsigned long flags;
 	struct tty_port *tport;
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
+	bool locked;
 
 	tport = &uport->state->port;
 	for (i = 0; i < rx_fifo_wc; i++) {
@@ -1062,9 +1073,14 @@
 		 * release the port lock before calling tty_flip_buffer_push()
 		 * to avoid deadlock scenarios.
 		 */
-		spin_unlock_irqrestore(&uport->lock, flags);
-		tty_flip_buffer_push(tport);
-		spin_lock_irqsave(&uport->lock, flags);
+		locked = msm_geni_serial_spinlocked(uport);
+		if (locked) {
+			spin_unlock_irqrestore(&uport->lock, *flags);
+			tty_flip_buffer_push(tport);
+			spin_lock_irqsave(&uport->lock, *flags);
+		} else {
+			tty_flip_buffer_push(tport);
+		}
 	}
 	return 0;
 }
@@ -1073,7 +1089,7 @@
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx)
+			bool drop_rx, unsigned long *flags)
 {
 	return -EPERM;
 }
@@ -1086,7 +1102,7 @@
 	struct circ_buf *xmit = &uport->state->xmit;
 	unsigned int xmit_size;
 	unsigned int dma_dbg;
-	bool timeout;
+	bool timeout, is_irq_masked;
 	int ret = 0;
 
 	xmit_size = uart_circ_chars_pending(xmit);
@@ -1117,13 +1133,13 @@
 		reinit_completion(&msm_port->m_cmd_timeout);
 
 		/*
-		 * Disabling the interrupts before giving the
+		 * Try disabling interrupts before giving the
 		 * cancel command as this might be in an atomic context.
 		 */
-		msm_geni_serial_disable_interrupts(uport);
+		is_irq_masked = msm_serial_try_disable_interrupts(uport);
 		geni_cancel_m_cmd(uport->membase);
 
-		timeout = geni_wait_for_cmd_done(uport);
+		timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
 		if (timeout) {
 			IPC_LOG_MSG(msm_port->console_log,
 			"%s: tx_cancel fail 0x%x\n", __func__,
@@ -1138,7 +1154,8 @@
 			/* Give abort command as cancel command failed */
 			geni_abort_m_cmd(uport->membase);
 
-			timeout = geni_wait_for_cmd_done(uport);
+			timeout = geni_wait_for_cmd_done(uport,
+							 is_irq_masked);
 			if (timeout) {
 				IPC_LOG_MSG(msm_port->console_log,
 				"%s: tx abort failed 0x%x\n", __func__,
@@ -1160,7 +1177,8 @@
 				geni_write_reg_nolog(1, uport->membase,
 						SE_DMA_TX_FSM_RST);
 
-				timeout = geni_wait_for_cmd_done(uport);
+				timeout = geni_wait_for_cmd_done(uport,
+							is_irq_masked);
 				if (timeout)
 					IPC_LOG_MSG(msm_port->ipc_log_misc,
 					"%s: tx fsm reset failed\n", __func__);
@@ -1243,7 +1261,7 @@
 static void stop_tx_sequencer(struct uart_port *uport)
 {
 	unsigned int geni_status;
-	bool timeout;
+	bool timeout, is_irq_masked;
 	unsigned int dma_dbg;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
@@ -1258,9 +1276,15 @@
 	port->m_cmd_done = false;
 	port->m_cmd = true;
 	reinit_completion(&port->m_cmd_timeout);
+	/*
+	 * Try to mask the interrupts before giving the
+	 * cancel command as this might be in an atomic context
+	 * from framework driver.
+	 */
+	is_irq_masked = msm_serial_try_disable_interrupts(uport);
 	geni_cancel_m_cmd(uport->membase);
 
-	timeout = geni_wait_for_cmd_done(uport);
+	timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
 	if (timeout) {
 		IPC_LOG_MSG(port->console_log, "%s: tx_cancel failed 0x%x\n",
 		__func__, geni_read_reg_nolog(uport->membase, SE_GENI_STATUS));
@@ -1271,7 +1295,7 @@
 		reinit_completion(&port->m_cmd_timeout);
 		geni_abort_m_cmd(uport->membase);
 
-		timeout = geni_wait_for_cmd_done(uport);
+		timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
 		if (timeout) {
 			IPC_LOG_MSG(port->console_log,
 				"%s: tx abort failed 0x%x\n", __func__,
@@ -1290,7 +1314,8 @@
 			geni_write_reg_nolog(1, uport->membase,
 						SE_DMA_TX_FSM_RST);
 
-			timeout = geni_wait_for_cmd_done(uport);
+			timeout = geni_wait_for_cmd_done(uport,
+							 is_irq_masked);
 			if (timeout)
 				IPC_LOG_MSG(port->ipc_log_misc,
 				"%s: tx fsm reset failed\n", __func__);
@@ -1302,6 +1327,8 @@
 			port->tx_dma = (dma_addr_t)NULL;
 		}
 	}
+	/* Unmask the interrupts once the cancel operation is done. */
+	msm_geni_serial_enable_interrupts(uport);
 	port->m_cmd = false;
 	port->xmit_size = 0;
 
@@ -1441,8 +1468,9 @@
 static void stop_rx_sequencer(struct uart_port *uport)
 {
 	unsigned int geni_status;
-	bool timeout;
+	bool timeout, is_irq_masked;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
+	unsigned long flags = 0;
 
 	IPC_LOG_MSG(port->ipc_log_misc, "%s\n", __func__);
 
@@ -1462,10 +1490,10 @@
 	IPC_LOG_MSG(port->ipc_log_misc, "%s: Start 0x%x\n",
 		    __func__, geni_status);
 	/*
-	 * Disabling the interrupts before giving the
+	 * Try disabling interrupts before giving the
 	 * cancel command as this might be in an atomic context.
 	 */
-	msm_geni_serial_disable_interrupts(uport);
+	is_irq_masked = msm_serial_try_disable_interrupts(uport);
 	geni_cancel_s_cmd(uport->membase);
 
 	/*
@@ -1473,21 +1501,35 @@
 	 * cancel control bit.
 	 */
 	mb();
-	timeout = geni_wait_for_cmd_done(uport);
+	timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
 	if (timeout) {
+		bool is_rx_active;
 		geni_status = geni_read_reg_nolog(uport->membase,
 							SE_GENI_STATUS);
+		/*
+		 * Possible that stop_rx is called from system resume context
+		 * for console usecase. In early resume, irq remains disabled
+		 * in the system. call msm_geni_serial_handle_isr to clear
+		 * the interrupts.
+		 */
+		is_rx_active = geni_status & S_GENI_CMD_ACTIVE;
 		IPC_LOG_MSG(port->ipc_log_misc,
-			"%s cancel failed 0x%x\n",  __func__, geni_status);
+			    "%s cancel failed is_rx_active:%d 0x%x\n",
+			    __func__, is_rx_active, geni_status);
 		IPC_LOG_MSG(port->console_log,
-			"%s cancel failed 0x%x\n",  __func__, geni_status);
+			    "%s cancel failed is_rx_active:%d 0x%x\n",
+			    __func__, is_rx_active, geni_status);
+		if (uart_console(uport) && !is_rx_active) {
+			msm_geni_serial_handle_isr(uport, &flags);
+			goto exit_rx_seq;
+		}
 		port->s_cmd_done = false;
 		reinit_completion(&port->s_cmd_timeout);
 		geni_abort_s_cmd(uport->membase);
 		/* Ensure this goes through before polling. */
 		mb();
 
-		timeout = geni_wait_for_cmd_done(uport);
+		timeout = geni_wait_for_cmd_done(uport, is_irq_masked);
 		if (timeout) {
 			geni_status = geni_read_reg_nolog(uport->membase,
 							SE_GENI_STATUS);
@@ -1526,7 +1568,7 @@
 			unsigned int rx_fifo_wc,
 			unsigned int rx_last_byte_valid,
 			unsigned int rx_last,
-			bool drop_rx)
+			bool drop_rx, unsigned long *flags)
 {
 	unsigned char *rx_char;
 	struct tty_port *tport;
@@ -1558,7 +1600,8 @@
 	return ret;
 }
 
-static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx)
+static int msm_geni_serial_handle_rx(struct uart_port *uport, bool drop_rx,
+				     unsigned long *flags)
 {
 	int ret = 0;
 	unsigned int rx_fifo_status;
@@ -1577,7 +1620,7 @@
 	rx_last = rx_fifo_status & RX_LAST;
 	if (rx_fifo_wc)
 		ret = port->handle_rx(uport, rx_fifo_wc, rx_last_byte_valid,
-							rx_last, drop_rx);
+						rx_last, drop_rx, flags);
 	return ret;
 }
 
@@ -1759,7 +1802,8 @@
 	return 0;
 }
 
-static void msm_geni_serial_handle_isr(struct uart_port *uport)
+static void msm_geni_serial_handle_isr(struct uart_port *uport,
+				       unsigned long *flags)
 {
 	unsigned int m_irq_status;
 	unsigned int s_irq_status;
@@ -1833,13 +1877,20 @@
 				"%s.sirq 0x%x break:%d\n",
 				__func__, s_irq_status, uport->icount.brk);
 		}
-
-		if (s_irq_status & (S_CMD_CANCEL_EN | S_CMD_ABORT_EN))
+		/*
+		 * In case of stop_rx handling there is a chance
+		 * for RX data can come in parallel. set drop_rx to
+		 * avoid data push to framework from handle_rx_console()
+		 * API for stop_rx case.
+		 */
+		if (s_irq_status & (S_CMD_CANCEL_EN | S_CMD_ABORT_EN)) {
 			s_cmd_done = true;
+			drop_rx = true;
+		}
 
 		if (s_irq_status & (S_RX_FIFO_WATERMARK_EN |
 							S_RX_FIFO_LAST_EN))
-			msm_geni_serial_handle_rx(uport, drop_rx);
+			msm_geni_serial_handle_rx(uport, drop_rx, flags);
 	} else {
 		dma_tx_status = geni_read_reg_nolog(uport->membase,
 							SE_DMA_TX_IRQ_STAT);
@@ -1933,7 +1984,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&uport->lock, flags);
-	msm_geni_serial_handle_isr(uport);
+	msm_geni_serial_handle_isr(uport, &flags);
 	spin_unlock_irqrestore(&uport->lock, flags);
 	return IRQ_HANDLED;
 }
@@ -2102,7 +2153,7 @@
 		 * it else we could end up in data loss scenarios.
 		 */
 		msm_port->xfer_mode = FIFO_MODE;
-		msm_geni_serial_disable_interrupts(uport);
+		msm_serial_try_disable_interrupts(uport);
 		msm_geni_serial_poll_tx_done(uport);
 		msm_geni_serial_enable_interrupts(uport);
 		se_get_packing_config(8, 1, false, &cfg0, &cfg1);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 13ea68d..32a7bd8 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -2331,8 +2331,14 @@
 			DWC3_DEVTEN_USBRSTEN |
 			DWC3_DEVTEN_DISCONNEVTEN);
 
+	/*
+	 * Enable SUSPENDEVENT(BIT:6) for version 230A and above
+	 * else enable USB Link change event (BIT:3) for older version
+	 */
 	if (dwc->revision < DWC3_REVISION_230A)
 		reg |= DWC3_DEVTEN_ULSTCNGEN;
+	else
+		reg |= DWC3_DEVTEN_EOPFEN;
 
 	dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
 }
@@ -3400,13 +3406,6 @@
 	speed = reg & DWC3_DSTS_CONNECTSPD;
 	dwc->speed = speed;
 
-	/* Enable SUSPENDEVENT(BIT:6) for version 230A and above */
-	if (dwc->revision >= DWC3_REVISION_230A) {
-		reg = dwc3_readl(dwc->regs, DWC3_DEVTEN);
-		reg |= DWC3_DEVTEN_EOPFEN;
-		dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
-	}
-
 	/* Reset the retry on erratic error event count */
 	dwc->retries_on_error = 0;
 
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index ae04cd8..7ed9cf7 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -139,6 +139,7 @@
 
 	struct ffs_data			*ffs;
 	struct ffs_ep			*ep;	/* P: ffs->eps_lock */
+	atomic_t			opened;
 
 	struct dentry			*dentry;
 
@@ -205,7 +206,7 @@
 	unsigned char			in;	/* P: ffs->eps_lock */
 	unsigned char			isoc;	/* P: ffs->eps_lock */
 
-	unsigned char			_pad;
+	bool				invalid;
 };
 
 struct ffs_buffer {
@@ -956,6 +957,16 @@
 		if (file->f_flags & O_NONBLOCK)
 			return -EAGAIN;
 
+		/*
+		 * epfile->invalid is set when EPs are disabled. Userspace
+		 * might have stale threads continuing to do I/O and may be
+		 * unaware of that especially if we block here. Instead return
+		 * an error immediately here and don't allow any more I/O
+		 * until the epfile is reopened.
+		 */
+		if (epfile->invalid)
+			return -ENODEV;
+
 		ret = wait_event_interruptible(
 				epfile->ffs->wait, (ep = epfile->ep));
 		if (ret)
@@ -1152,15 +1163,16 @@
 
 	ENTER();
 
-	ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
-		epfile->ffs->state, epfile->ffs->setup_state,
-		epfile->ffs->flags);
+	ffs_log("%s: state %d setup_state %d flag %lu opened %u",
+		epfile->name, epfile->ffs->state, epfile->ffs->setup_state,
+		epfile->ffs->flags, atomic_read(&epfile->opened));
 
 	if (WARN_ON(epfile->ffs->state != FFS_ACTIVE))
 		return -ENODEV;
 
 	file->private_data = epfile;
 	ffs_data_opened(epfile->ffs);
+	atomic_inc(&epfile->opened);
 
 	return 0;
 }
@@ -1301,9 +1313,12 @@
 	ENTER();
 
 	__ffs_epfile_read_buffer_free(epfile);
-	ffs_log("%s: state %d setup_state %d flag %lu", epfile->name,
-			epfile->ffs->state, epfile->ffs->setup_state,
-			epfile->ffs->flags);
+	ffs_log("%s: state %d setup_state %d flag %lu opened %u",
+		epfile->name, epfile->ffs->state, epfile->ffs->setup_state,
+		epfile->ffs->flags, atomic_read(&epfile->opened));
+
+	if (atomic_dec_and_test(&epfile->opened))
+		epfile->invalid = false;
 
 	ffs_data_closed(epfile->ffs);
 
@@ -1333,6 +1348,10 @@
 		if (file->f_flags & O_NONBLOCK)
 			return -EAGAIN;
 
+		/* don't allow any I/O until file is reopened */
+		if (epfile->invalid)
+			return -ENODEV;
+
 		ret = wait_event_interruptible(
 				epfile->ffs->wait, (ep = epfile->ep));
 		if (ret)
@@ -1998,6 +2017,8 @@
 			ffs_epfiles_destroy(epfiles, i - 1);
 			return -ENOMEM;
 		}
+
+		atomic_set(&epfile->opened, 0);
 	}
 
 	ffs->epfiles = epfiles;
@@ -2045,6 +2066,7 @@
 		++ep;
 
 		if (epfile) {
+			epfile->invalid = true; /* until file is reopened */
 			epfile->ep = NULL;
 			__ffs_epfile_read_buffer_free(epfile);
 			++epfile;
diff --git a/include/linux/i2c-msm-v2.h b/include/linux/i2c-msm-v2.h
new file mode 100644
index 0000000..6aac342
--- /dev/null
+++ b/include/linux/i2c-msm-v2.h
@@ -0,0 +1,664 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2014-2015,2017-2020, The Linux Foundation. All rights reserved.
+ */
+
+/*
+ * I2C controller driver for Qualcomm Technologies Inc platforms
+ */
+
+#ifndef _I2C_MSM_V2_H
+#define _I2C_MSM_V2_H
+
+#include <linux/bitops.h>
+#include <linux/dmaengine.h>
+
+enum msm_i2_debug_level {
+	MSM_ERR,	/* Error messages only. Always on */
+	MSM_PROF,	/* High level events. Use for profiling */
+	MSM_DBG,	/* Low level details. Use for debugging */
+};
+
+#define i2c_msm_dbg(ctrl, dbg_level, fmt, ...) do {\
+		if (ctrl->dbgfs.dbg_lvl >= dbg_level)\
+			dev_info(ctrl->dev, fmt, ##__VA_ARGS__);\
+	} while (0)
+
+#define BIT_IS_SET(val, idx)        ((val >> idx) & 0x1)
+#define BITS_AT(val, idx, n_bits)(((val) & (((1 << n_bits) - 1) << idx)) >> idx)
+#define MASK_IS_SET(val, mask)      ((val & mask) == mask)
+#define MASK_IS_SET_BOOL(val, mask) (MASK_IS_SET(val, mask) ? 1 : 0)
+#define KHz(freq) (1000 * freq)
+#define I2C_MSM_CLK_FAST_PLUS_FREQ  (1000000)
+
+/* QUP Registers */
+enum {
+	QUP_CONFIG              = 0x0,
+	QUP_STATE               = 0x4,
+	QUP_IO_MODES            = 0x8,
+	QUP_SW_RESET            = 0xC,
+	QUP_OPERATIONAL         = 0x18,
+	QUP_ERROR_FLAGS         = 0x1C,
+	QUP_ERROR_FLAGS_EN      = 0x20,
+	QUP_TEST_CTRL           = 0x24,
+	QUP_OPERATIONAL_MASK    = 0x28,
+	QUP_HW_VERSION          = 0x30,
+	QUP_MX_READ_COUNT       = 0x208,
+	QUP_MX_WRITE_COUNT      = 0x150,
+	QUP_MX_OUTPUT_COUNT     = 0x100,
+	QUP_MX_INPUT_COUNT      = 0x200,
+	QUP_MX_WR_CNT           = 0x100,
+	QUP_OUT_DEBUG           = 0x108,
+	QUP_OUT_FIFO_CNT        = 0x10C,
+	QUP_OUT_FIFO_BASE       = 0x110,
+	QUP_IN_READ_CUR         = 0x20C,
+	QUP_IN_DEBUG            = 0x210,
+	QUP_IN_FIFO_CNT         = 0x214,
+	QUP_IN_FIFO_BASE        = 0x218,
+	QUP_I2C_MASTER_CLK_CTL  = 0x400,
+	QUP_I2C_STATUS          = 0x404,
+	QUP_I2C_MASTER_CONFIG   = 0x408,
+	QUP_I2C_MASTER_BUS_CLR  = 0x40C,
+};
+
+/* Register:QUP_STATE state field values */
+enum i2c_msm_qup_state {
+	QUP_STATE_RESET         = 0,
+	QUP_STATE_RUN           = 1U,
+	QUP_STATE_PAUSE         = 3U,
+};
+
+/* Register:QUP_STATE fields */
+enum {
+	QUP_STATE_MASK          = 3U,
+	QUP_STATE_VALID         = BIT(2),
+	QUP_I2C_MAST_GEN        = BIT(4),
+	QUP_I2C_FLUSH           = BIT(6),
+	QUP_I2C_STATUS_RESET    = 0x42,
+};
+
+
+/* Register:QUP_CONFIG fields */
+enum {
+	QUP_MINI_CORE_MASK      = 0xF00,
+	QUP_MINI_CORE_I2C_VAL   = 0x200,
+	QUP_N_MASK              = 0x1F,
+	QUP_N_VAL               = 0x7, /* 0xF for A family */
+	QUP_NO_OUTPUT           = BIT(6),
+	QUP_NO_INPUT            = BIT(7),
+	QUP_APP_CLK_ON_EN       = BIT(12),
+	QUP_CORE_CLK_ON_EN      = BIT(13),
+	QUP_FIFO_CLK_GATE_EN    = BIT(14),
+};
+
+/* Register:QUP_OPERATIONAL fields */
+enum {
+	QUP_INPUT_FIFO_NOT_EMPTY = BIT(5),
+	QUP_OUTPUT_SERVICE_FLAG  = BIT(8),
+	QUP_INPUT_SERVICE_FLAG   = BIT(9),
+	QUP_MAX_OUTPUT_DONE_FLAG = BIT(10),
+	QUP_MAX_INPUT_DONE_FLAG  = BIT(11),
+	QUP_OUT_BLOCK_WRITE_REQ  = BIT(12),
+	QUP_IN_BLOCK_READ_REQ    = BIT(13),
+};
+
+/* Register:QUP_OPERATIONAL_MASK fields */
+enum {
+	QUP_INPUT_SERVICE_MASK  = BIT(9),
+	QUP_OUTPUT_SERVICE_MASK = BIT(8),
+};
+
+/* Register:QUP_IO_MODES fields */
+enum {
+	QUP_OUTPUT_MODE         = BIT(10) | BIT(11),
+	QUP_INPUT_MODE          = BIT(12) | BIT(13),
+	QUP_UNPACK_EN           = BIT(14),
+	QUP_PACK_EN             = BIT(15),
+	QUP_OUTPUT_BIT_SHIFT_EN = BIT(16),
+};
+
+/* Register:QUP_I2C_STATUS (a.k.a I2C_MASTER_STATUS) fields */
+enum {
+	QUP_BUS_ERROR           = BIT(2),
+	QUP_PACKET_NACKED       = BIT(3),
+	QUP_ARB_LOST            = BIT(4),
+	QUP_INVALID_WRITE       = BIT(5),
+	QUP_FAILED              = BIT(6),
+	QUP_BUS_ACTIVE          = BIT(8),
+	QUP_BUS_MASTER          = BIT(9),
+	QUP_INVALID_TAG         = BIT(23),
+	QUP_INVALID_READ_ADDR   = BIT(24),
+	QUP_INVALID_READ_SEQ    = BIT(25),
+	QUP_I2C_SDA             = BIT(26),
+	QUP_I2C_SCL             = BIT(27),
+	QUP_MSTR_STTS_ERR_MASK  = 0x380003C,
+};
+
+/* Register:QUP_I2C_MASTER_CONFIG fields */
+enum {
+	QUP_EN_VERSION_TWO_TAG  = 1U,
+};
+
+/* Register:QUP_I2C_MASTER_CLK_CTL field setters */
+#define I2C_MSM_SCL_NOISE_REJECTION(reg_val, noise_rej_val) \
+		(((reg_val) & ~(0x3 << 24)) | (((noise_rej_val) & 0x3) << 24))
+#define I2C_MSM_SDA_NOISE_REJECTION(reg_val, noise_rej_val) \
+		(((reg_val) & ~(0x3 << 26)) | (((noise_rej_val) & 0x3) << 26))
+
+/* Register:QUP_ERROR_FLAGS_EN flags */
+enum {
+	QUP_OUTPUT_OVER_RUN_ERR_EN  = BIT(5),
+	QUP_INPUT_UNDER_RUN_ERR_EN  = BIT(4),
+	QUP_OUTPUT_UNDER_RUN_ERR_EN = BIT(3),
+	QUP_INPUT_OVER_RUN_ERR_EN   = BIT(2),
+};
+
+/* Status, Error flags */
+enum {
+	I2C_STATUS_WR_BUFFER_FULL  = BIT(0),
+	I2C_STATUS_BUS_ACTIVE      = BIT(8),
+	I2C_STATUS_BUS_MASTER      = BIT(9),
+	I2C_STATUS_ERROR_MASK      = 0x38000FC,
+	QUP_I2C_NACK_FLAG          = BIT(3),
+	QUP_IN_NOT_EMPTY           = BIT(5),
+	QUP_ERR_FLGS_MASK           = 0x3C,
+};
+
+/* Master status clock states */
+enum {
+	I2C_CLK_RESET_BUSIDLE_STATE = 0,
+	I2C_CLK_FORCED_LOW_STATE    = 5,
+};
+
+/* Controller's power state */
+enum i2c_msm_power_state {
+	I2C_MSM_PM_RT_ACTIVE,
+	I2C_MSM_PM_RT_SUSPENDED,
+	I2C_MSM_PM_SYS_SUSPENDED
+};
+
+/*
+ * The max buffer size required for tags is for holding the following sequence:
+ * [start] + [start | slv-addr] + [ rd/wr | len]
+ * which sum up to 6 bytes. However, we use u64 to hold the value, thus we say
+ * that max length is 8 bytes.
+ */
+#define I2C_MSM_TAG2_MAX_LEN            (4)
+#define I2C_MSM_DMA_TX_SZ             (64) /* tx chan n entries */
+#define I2C_MSM_DMA_RX_SZ             (32) /* rx chan n entries */
+#define I2C_MSM_DMA_DESC_ARR_SIZ  (I2C_MSM_DMA_TX_SZ + I2C_MSM_DMA_RX_SZ)
+#define I2C_MSM_REG_2_STR_BUF_SZ        (128)
+/* Optimal value to hold the error strings */
+#define I2C_MSM_MAX_ERR_BUF_SZ		(256)
+#define I2C_MSM_BUF_DUMP_MAX_BC         (20)
+#define I2C_MSM_MAX_POLL_MSEC           (100)
+#define I2C_MSM_TIMEOUT_SAFETY_COEF     (10)
+#define I2C_MSM_TIMEOUT_MIN_USEC        (500000)
+
+/* QUP v2 tags */
+#define QUP_TAG2_DATA_WRITE        (0x82ULL)
+#define QUP_TAG2_DATA_WRITE_N_STOP (0x83ULL)
+#define QUP_TAG2_DATA_READ         (0x85ULL)
+#define QUP_TAG2_DATA_READ_N_STOP  (0x87ULL)
+#define QUP_TAG2_START             (0x81ULL)
+#define QUP_TAG2_DATA_READ_N_NACK  (0x86ULL)
+#define QUP_TAG2_START_STOP        (0x8AULL)
+#define QUP_TAG2_INPUT_EOT         (0x93ULL)
+#define QUP_TAG2_FLUSH_STOP        (0x96ULL)
+#define QUP_BUF_OVERHD_BC          (2)
+#define QUP_MAX_BUF_SZ             (256)
+
+enum i2c_msm_clk_path_vec_idx {
+	I2C_MSM_CLK_PATH_SUSPEND_VEC,
+	I2C_MSM_CLK_PATH_RESUME_VEC,
+};
+#define I2C_MSM_CLK_PATH_AVRG_BW(ctrl) (0)
+/*
+ * Reducing the frequency by 1 to make sure it is less than 19.2MHz
+ * so that we don't need RPM ack to unvote which will work only if vote
+ * is less than or equal to 19.2MHz. To be on the safe side we are decreasing
+ * frequency by 1.
+ */
+#define I2C_MSM_CLK_PATH_BRST_BW(ctrl) ((ctrl->rsrcs.clk_freq_in - 1) * 4)
+
+enum i2c_msm_gpio_name_idx {
+	I2C_MSM_GPIO_SCL,
+	I2C_MSM_GPIO_SDA,
+};
+
+extern const char * const i2c_msm_mode_str_tbl[];
+
+struct i2c_msm_ctrl;
+
+/*
+ *  i2c_msm_dma_mem: utility struct which holds both physical and virtual addr
+ */
+struct i2c_msm_dma_mem {
+	dma_addr_t               phy_addr;
+	void                    *vrtl_addr;
+};
+
+/*
+ * i2c_msm_tag: tag's data and its length.
+ *
+ * @len tag len can be two, four or six bytes.
+ */
+struct i2c_msm_tag {
+	u64                    val;
+	int                    len;
+};
+
+/*
+ * i2c_msm_dma_tag: similar to struct i2c_msm_tag but holds physical address.
+ *
+ * @buf physical address of entry in the tag_arr of
+ *          struct i2c_msm_xfer_mode_dma
+ * @len tag len.
+ *
+ * Hold the information from i2c_msm_dma_xfer_prepare() which is used by
+ * i2c_msm_dma_xfer_process() and freed by i2c_msm_dma_xfer_unprepare()
+ */
+struct i2c_msm_dma_tag {
+	dma_addr_t             buf;
+	size_t                 len;
+};
+
+/*
+ * i2c_msm_dma_buf: dma mapped pointer to i2c_msg data buffer and related tag
+ * @vir_addr ptr to i2c_msg buf beginning or with offset (when buf len > 256)
+ */
+struct i2c_msm_dma_buf {
+	struct i2c_msm_dma_mem   ptr;
+	enum dma_data_direction  dma_dir;
+	size_t                   len;
+	bool                     is_rx;
+	bool                     is_last;
+	struct i2c_msm_dma_tag   tag;
+	/* DMA API */
+	struct scatterlist	sg_list[2];
+};
+
+/*
+ * i2c_msm_dma_chan: per channel info
+ *
+ * @is_init true when the channel is initialized and requires eventual teardown.
+ * @name channel name (tx/rx) for debugging.
+ * @desc_cnt_cur number of occupied descriptors
+ */
+struct i2c_msm_dma_chan {
+	bool                     is_init;
+	const char              *name;
+	size_t                   desc_cnt_cur;
+	struct dma_chan         *dma_chan;
+	enum dma_transfer_direction dir;
+};
+
+enum i2c_msm_dma_chan_dir {
+	I2C_MSM_DMA_TX,
+	I2C_MSM_DMA_RX,
+	I2C_MSM_DMA_CNT,
+};
+
+enum i2c_msm_dma_state {
+	I2C_MSM_DMA_INIT_NONE, /* Uninitialized  DMA */
+	I2C_MSM_DMA_INIT_CORE, /* Core init not channels, memory Allocated */
+	I2C_MSM_DMA_INIT_CHAN, /* Both Core and channels are init */
+};
+
+/*
+ * struct i2c_msm_xfer_mode_dma: DMA mode configuration and work space
+ *
+ * @state   specifies the DMA core and channel initialization states.
+ * @buf_arr_cnt current number of valid buffers in buf_arr. The valid buffers
+ *          are at index 0..buf_arr_cnt excluding buf_arr_cnt.
+ * @buf_arr array of descriptors which point to the user's buffer
+ *     virtual and physical address, and hold meta data about the buffer
+ *     and respective tag.
+ * @tag_arr array of tags in DMAable memory. Holds a tag per buffer of the same
+ *          index, that is tag_arr[i] is related to buf_arr[i]. Also, tag_arr[i]
+ *          is queued in the tx channel just before buf_arr[i] is queued in
+ *          the tx (output buf) or rx channel (input buffer).
+ * @eot_n_flush_stop_tags EOT and flush-stop tags to be queued to the tx
+ *          DMA channel after the last transfer when it is a read.
+ * @input_tag hw is placing input tags in the rx channel on read operations.
+ *          The value of these tags is "don't care" from DMA transfer
+ *          perspective. Thus, this single buffer is used for all the input
+ *          tags. The field is used as write only.
+ */
+struct i2c_msm_xfer_mode_dma {
+	enum i2c_msm_dma_state   state;
+	size_t                   buf_arr_cnt;
+	struct i2c_msm_dma_buf   buf_arr[I2C_MSM_DMA_DESC_ARR_SIZ];
+	struct i2c_msm_dma_mem   tag_arr;
+	struct i2c_msm_dma_mem   eot_n_flush_stop_tags;
+	struct i2c_msm_dma_mem   input_tag;
+	struct i2c_msm_dma_chan  chan[I2C_MSM_DMA_CNT];
+};
+
+/*
+ * I2C_MSM_DMA_TAG_MEM_SZ includes the following fields of
+ * struct i2c_msm_xfer_mode_dma (in order):
+ *
+ * Buffer of DMA memory:
+ * +-----------+---------+-----------+-----------+----+-----------+
+ * | input_tag | eot_... | tag_arr 0 | tag_arr 1 | .. | tag_arr n |
+ * +-----------+---------+-----------+-----------+----+-----------+
+ *
+ * Why +2?
+ * One tag buffer for the input tags. This is a write only buffer for DMA, it is
+ *    used to read the tags of the input fifo. We let them overwrite each other,
+ *    since it is a throw-away from the driver's perspective.
+ * Second tag buffer for the EOT and flush-stop tags. This is a read only
+ *    buffer (from DMA perspective). It is used to put EOT and flush-stop at the
+ *    end of every transaction.
+ */
+#define I2C_MSM_DMA_TAG_MEM_SZ  \
+	((I2C_MSM_DMA_DESC_ARR_SIZ + 2) * I2C_MSM_TAG2_MAX_LEN)
+
+/*
+ * i2c_msm_xfer_mode_fifo: operations and state of FIFO mode
+ *
+ * @ops     "base class" of i2c_msm_xfer_mode_dma. Contains the operations while
+ *          the rest of the fields contain the data.
+ * @input_fifo_sz input fifo size in bytes
+ * @output_fifo_sz output fifo size in bytes
+ * @in_rem  remaining u32 entries in input FIFO before empty
+ * @out_rem remaining u32 entries in output FIFO before full
+ * @out_buf buffer for collecting bytes to four bytes groups (u32) before
+ *          writing them to the output fifo.
+ * @out_buf_idx next free index in out_buf. 0..3
+ */
+struct i2c_msm_xfer_mode_fifo {
+	size_t                   input_fifo_sz;
+	size_t                   output_fifo_sz;
+	size_t                   in_rem;
+	size_t                   out_rem;
+	u8                       out_buf[4];
+	int                      out_buf_idx;
+};
+
+/* i2c_msm_xfer_mode_blk: operations and state of Block mode
+ *
+ * @is_init when true, struct is initialized and requires mem free on exit
+ * @in_blk_sz size of input/rx block
+ * @out_blk_sz size of output/tx block
+ * @tx_cache internal buffer to store tx data
+ * @rx_cache internal buffer to store rx data
+ * @rx_cache_idx points to the next unread index in rx cache
+ * @tx_cache_idx points to the next unwritten index in tx cache
+ * @wait_rx_blk completion object to wait on for end of blk rx transfer.
+ * @wait_tx_blk completion object to wait on for end of blk tx transfer.
+ * @complete_mask applied to QUP_OPERATIONAL to determine when blk
+ *  xfer is complete.
+ */
+struct i2c_msm_xfer_mode_blk {
+	bool                     is_init;
+	size_t                   in_blk_sz;
+	size_t                   out_blk_sz;
+	u8                       *tx_cache;
+	u8                       *rx_cache;
+	int                      rx_cache_idx;
+	int                      tx_cache_idx;
+	struct completion        wait_rx_blk;
+	struct completion        wait_tx_blk;
+	u32                      complete_mask;
+};
+
+/* INPUT_MODE and OUTPUT_MODE filds of QUP_IO_MODES register */
+enum i2c_msm_xfer_mode_id {
+	I2C_MSM_XFER_MODE_FIFO,
+	I2C_MSM_XFER_MODE_BLOCK,
+	I2C_MSM_XFER_MODE_DMA,
+	I2C_MSM_XFER_MODE_NONE, /* keep last as a counter */
+};
+
+
+struct i2c_msm_dbgfs {
+	struct dentry             *root;
+	enum msm_i2_debug_level    dbg_lvl;
+	enum i2c_msm_xfer_mode_id  force_xfer_mode;
+};
+
+/*
+ * qup_i2c_clk_path_vote: data to use bus scaling driver for clock path vote
+ *
+ * @mstr_id master id number of the i2c core or its wrapper (BLSP/GSBI).
+ *       When zero, clock path voting is disabled.
+ * @client_hdl when zero, client is not registered with the bus scaling driver,
+ *      and bus scaling functionality should not be used. When non zero, it
+ *      is a bus scaling client id and may be used to vote for clock path.
+ * @reg_err when true, registration error was detected and an error message was
+ *      logged. i2c will attempt to re-register but will log error only once.
+ *      once registration succeed, the flag is set to false.
+ * @actv_only when set, votes when system active and removes the vote when
+ *       system goes idle (optimises for performance). When unset, voting using
+ *       runtime pm (optimizes for power).
+ */
+struct qup_i2c_clk_path_vote {
+	u32                         mstr_id;
+	u32                         client_hdl;
+	struct msm_bus_scale_pdata *pdata;
+	bool                        reg_err;
+	bool                        actv_only;
+};
+
+/*
+ * i2c_msm_resources: OS resources
+ *
+ * @mem  I2C controller memory resource from platform data.
+ * @base I2C controller virtual base address
+ * @clk_freq_in core clock frequency in Hz
+ * @clk_freq_out bus clock frequency in Hz
+ */
+struct i2c_msm_resources {
+	struct resource             *mem;
+	void __iomem                *base; /* virtual */
+	struct clk                  *core_clk;
+	struct clk                  *iface_clk;
+	int                          clk_freq_in;
+	int                          clk_freq_out;
+	struct qup_i2c_clk_path_vote clk_path_vote;
+	int                          irq;
+	bool                         disable_dma;
+	struct pinctrl              *pinctrl;
+	struct pinctrl_state        *gpio_state_active;
+	struct pinctrl_state        *gpio_state_suspend;
+};
+
+#define I2C_MSM_PINCTRL_ACTIVE       "i2c_active"
+#define I2C_MSM_PINCTRL_SUSPEND      "i2c_sleep"
+
+/*
+ * i2c_msm_xfer_buf: current xfer position and preprocessed tags
+ *
+ * @is_init the buf is marked initialized by the first call to
+ *          i2c_msm_xfer_next_buf()
+ * @msg_idx   index of the message that the buffer is pointing to
+ * @byte_idx  index of first byte in the current buffer
+ * @end_idx   count of bytes processed from the current message. This value
+ *            is compared against len to find out if buffer is done processing.
+ * @len       number of bytes in current buffer.
+ * @is_rx when true, current buffer is pointing to a i2c read operation.
+ * @slv_addr 8 bit address. This is the i2c_msg->addr + rd/wr bit.
+ *
+ * Keep track of current position in the client's transfer request and
+ * pre-process a transfer's buffer and tags.
+ */
+struct i2c_msm_xfer_buf {
+	bool                       is_init;
+	int                        msg_idx;
+	int                        byte_idx;
+	int                        end_idx;
+	int                        len;
+	bool                       is_rx;
+	bool                       is_last;
+	u16                        slv_addr;
+	struct i2c_msm_tag         in_tag;
+	struct i2c_msm_tag         out_tag;
+};
+
+#ifdef DEBUG
+#define I2C_MSM_PROF_MAX_EVNTS   (64)
+#else
+#define I2C_MSM_PROF_MAX_EVNTS   (16)
+#endif
+
+/*
+ * i2c_msm_prof_event: profiling event
+ *
+ * @data Additional data about the event. The interpretation of the data is
+ *       dependent on the type field.
+ * @type event type (see enum i2c_msm_prof_event_type)
+ */
+struct i2c_msm_prof_event {
+	struct timespec time;
+	u64             data0;
+	u32             data1;
+	u32             data2;
+	u8              type;
+	u8              dump_func_id;
+};
+
+enum i2c_msm_err {
+	I2C_MSM_NO_ERR = 0,
+	I2C_MSM_ERR_NACK,
+	I2C_MSM_ERR_ARB_LOST,
+	I2C_MSM_ERR_BUS_ERR,
+	I2C_MSM_ERR_TIMEOUT,
+	I2C_MSM_ERR_CORE_CLK,
+	I2C_MSM_ERR_OVR_UNDR_RUN,
+};
+
+/*
+ * i2c_msm_xfer: A client transfer request. A list of one or more i2c messages
+ *
+ * @msgs         NULL when no active xfer. Points to array of i2c_msgs
+ *               given by the client.
+ * @msg_cnt      number of messages in msgs array.
+ * @complete     completion object to wait on for end of transfer.
+ * @rx_cnt       number of input  bytes in the client's request.
+ * @tx_cnt       number of output bytes in the client's request.
+ * @rx_ovrhd_cnt number of input  bytes due to tags.
+ * @tx_ovrhd_cnt number of output bytes due to tags.
+ * @event        profiling data. An array of timestamps of transfer events
+ * @event_cnt    number of items in event array.
+ * @is_active    true during xfer process and false after xfer end
+ * @mtx          mutex to solve multithreaded problem in xfer
+ */
+struct i2c_msm_xfer {
+	struct i2c_msg            *msgs;
+	int                        msg_cnt;
+	enum i2c_msm_xfer_mode_id  mode_id;
+	struct completion          complete;
+	struct completion          rx_complete;
+	size_t                     rx_cnt;
+	size_t                     tx_cnt;
+	size_t                     rx_ovrhd_cnt;
+	size_t                     tx_ovrhd_cnt;
+	struct i2c_msm_xfer_buf    cur_buf;
+	u32                        timeout;
+	bool                       last_is_rx;
+	enum i2c_msm_err           err;
+	struct i2c_msm_prof_event  event[I2C_MSM_PROF_MAX_EVNTS];
+	atomic_t                   event_cnt;
+	atomic_t                   is_active;
+	struct mutex               mtx;
+	struct i2c_msm_xfer_mode_fifo	fifo;
+	struct i2c_msm_xfer_mode_blk	blk;
+	struct i2c_msm_xfer_mode_dma	dma;
+};
+
+/*
+ * i2c_msm_ctrl: the driver's main struct
+ *
+ * @is_init true when
+ * @ver info that is different between i2c controller versions
+ * @ver_num  ha
+ * @xfer     state of the currently processed transfer.
+ * @dbgfs    debug-fs root and values that may be set via debug-fs.
+ * @rsrcs    resources from platform data including clocks, gpios, irqs, and
+ *           memory regions.
+ * @mstr_clk_ctl cached value for programming to mstr_clk_ctl register
+ * @i2c_sts_reg	 status of QUP_I2C_MASTER_STATUS register.
+ * @qup_op_reg	 status of QUP_OPERATIONAL register.
+ */
+struct i2c_msm_ctrl {
+	struct device             *dev;
+	struct i2c_adapter         adapter;
+	struct i2c_msm_xfer        xfer;
+	struct i2c_msm_dbgfs       dbgfs;
+	struct i2c_msm_resources   rsrcs;
+	u32                        mstr_clk_ctl;
+	u32			   i2c_sts_reg;
+	u32			   qup_op_reg;
+	enum i2c_msm_power_state   pwr_state;
+};
+
+/* Enum for the profiling event types */
+enum i2c_msm_prof_evnt_type {
+	I2C_MSM_VALID_END,
+	I2C_MSM_PIP_DSCN,
+	I2C_MSM_PIP_CNCT,
+	I2C_MSM_ACTV_END,
+	I2C_MSM_IRQ_BGN,
+	I2C_MSM_IRQ_END,
+	I2C_MSM_XFER_BEG,
+	I2C_MSM_XFER_END,
+	I2C_MSM_SCAN_SUM,
+	I2C_MSM_NEXT_BUF,
+	I2C_MSM_COMPLT_OK,
+	I2C_MSM_COMPLT_FL,
+	I2C_MSM_PROF_RESET,
+};
+
+#ifdef CONFIG_I2C_MSM_PROF_DBG
+void i2c_msm_dbgfs_init(struct i2c_msm_ctrl *ctrl);
+
+void i2c_msm_dbgfs_teardown(struct i2c_msm_ctrl *ctrl);
+
+/* diagonisis the i2c registers and dump the errors accordingly */
+const char *i2c_msm_dbg_tag_to_str(const struct i2c_msm_tag *tag,
+						char *buf, size_t buf_len);
+
+void i2c_msm_prof_evnt_dump(struct i2c_msm_ctrl *ctrl);
+
+/* function definitions to be used from the i2c-msm-v2-debug file */
+void i2c_msm_prof_evnt_add(struct i2c_msm_ctrl *ctrl,
+				enum msm_i2_debug_level dbg_level,
+				enum i2c_msm_prof_evnt_type event,
+				u64 data0, u32 data1, u32 data2);
+
+int i2c_msm_dbg_qup_reg_dump(struct i2c_msm_ctrl *ctrl);
+
+const char *
+i2c_msm_dbg_dma_tag_to_str(const struct i2c_msm_dma_tag *dma_tag, char *buf,
+								size_t buf_len);
+#else
+/* use dummy functions */
+static inline void i2c_msm_dbgfs_init(struct i2c_msm_ctrl *ctrl) {}
+static inline void i2c_msm_dbgfs_teardown(struct i2c_msm_ctrl *ctrl) {}
+
+static inline const char *i2c_msm_dbg_tag_to_str(const struct i2c_msm_tag *tag,
+						char *buf, size_t buf_len)
+{
+	return NULL;
+}
+static inline void i2c_msm_prof_evnt_dump(struct i2c_msm_ctrl *ctrl) {}
+
+/* function definitions to be used from the i2c-msm-v2-debug file */
+static inline void i2c_msm_prof_evnt_add(struct i2c_msm_ctrl *ctrl,
+				enum msm_i2_debug_level dbg_level,
+				enum i2c_msm_prof_evnt_type event,
+				u64 data0, u32 data1, u32 data2) {}
+
+static inline int i2c_msm_dbg_qup_reg_dump(struct i2c_msm_ctrl *ctrl)
+{
+	return true;
+}
+static inline const char *i2c_msm_dbg_dma_tag_to_str(const struct
+			i2c_msm_dma_tag * dma_tag, char *buf, size_t buf_len)
+{
+	return NULL;
+}
+#endif /* I2C_MSM_V2_PROF_DBG */
+#endif /* _I2C_MSM_V2_H */
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 5b38bca..6297d72 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -894,10 +894,13 @@
 		ret = wait_event_interruptible_timeout(dev->disconnect_wq,
 				!atomic_read(&dev->in_use),
 				msecs_to_jiffies(DEV_RELEASE_WAIT_TIMEOUT));
-		if (!ret)
+		if (!ret) {
 			uaudio_err("timeout while waiting for dev_release\n");
-		else if (ret < 0)
+			atomic_set(&dev->in_use, 0);
+		} else if (ret < 0) {
 			uaudio_err("failed with ret %d\n", ret);
+			atomic_set(&dev->in_use, 0);
+		}
 
 		mutex_lock(&chip->dev_lock);
 	}
@@ -1156,22 +1159,20 @@
 	mutex_unlock(&chip->dev_lock);
 
 response:
-	if (!req_msg->enable && ret != -EINVAL) {
-		if (ret != -ENODEV) {
-			if (info_idx >= 0) {
-				mutex_lock(&chip->dev_lock);
-				info = &uadev[pcm_card_num].info[info_idx];
-				uaudio_dev_intf_cleanup(
-						uadev[pcm_card_num].udev,
-						info);
-				uaudio_dbg("release resources: intf# %d card# %d\n",
-						subs->interface, pcm_card_num);
-				mutex_unlock(&chip->dev_lock);
-			}
+	if (!req_msg->enable && (ret != -EINVAL || ret != -ENODEV)) {
+		mutex_lock(&chip->dev_lock);
+		if (info_idx >= 0) {
+			info = &uadev[pcm_card_num].info[info_idx];
+			uaudio_dev_intf_cleanup(
+					uadev[pcm_card_num].udev,
+					info);
+			uaudio_dbg("release resources: intf# %d card# %d\n",
+					subs->interface, pcm_card_num);
 		}
 		if (atomic_read(&uadev[pcm_card_num].in_use))
 			kref_put(&uadev[pcm_card_num].kref,
 					uaudio_dev_release);
+		mutex_unlock(&chip->dev_lock);
 	}
 
 	resp.usb_token = req_msg->usb_token;