Merge "ARM: dts: msm: Add RTB and MSM MPM sleep counter nodes for kona"
diff --git a/Documentation/devicetree/bindings/arm/msm/spcom.txt b/Documentation/devicetree/bindings/arm/msm/spcom.txt
new file mode 100644
index 0000000..36a07ec
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/spcom.txt
@@ -0,0 +1,11 @@
+Qualcomm Technologies, Inc. Secure Proccessor Communication (spcom)
+
+Required properties:
+-compatible : should be "qcom,spcom"
+-qcom,spcom-ch-names: predefined channels name string
+
+Example:
+    qcom,spcom {
+            compatible = "qcom,spcom";
+            qcom,spcom-ch-names = "sp_kernel" , "sp_ssr";
+    };
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc.txt b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
index 50f8897..aea55d1 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc.txt
@@ -21,6 +21,7 @@
 			"qcom,gcc-mdm9615"
 			"qcom,gcc-sdm845"
 			"qcom,gcc-kona"
+			"qcom,gcc-lito"
 
 - reg : shall contain base register location and length
 - #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
new file mode 100644
index 0000000..db7d7d4
--- /dev/null
+++ b/Documentation/devicetree/bindings/regulator/rpmh-regulator.txt
@@ -0,0 +1,285 @@
+Qualcomm Technologies, Inc. RPMh Regulators
+
+rpmh-regulator devices support PMIC regulator management via the VRM, ARC and
+XOB RPMh accelerators.  The APPS processor communicates with these hardware
+blocks via an RSC using command packets.  The VRM allows changing four
+parameters for a given regulator: enable state, output voltage, operating mode,
+and minimum headroom voltage.  The ARC allows changing only a single parameter
+for a given regulator: its operating level.  This operating level is fed into
+CPR which then decides upon a final explicit voltage for the regulator.  The XOB
+allows changing only a single parameter for a given regulator: its enable state.
+
+=======================
+Required Node Structure
+=======================
+
+RPMh regulators must be described in two levels of device nodes.  The first
+level describes the interface with RPMh (resource) and must reside within an
+RPMh device node.  The second level describes properties of one regulator
+framework interface (of potentially many) for the regulator resource.
+
+==================================
+First Level Nodes - RPMh Interface
+==================================
+
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: Must be "qcom,rpmh-vrm-regulator", "qcom,rpmh-arc-regulator"
+		    or "qcom,rpmh-xob-regulator" depending upon the hardware
+		    type, VRM, ARC or XOB, of the RPMh managed regulator
+		    resource.
+
+- qcom,resource-name
+	Usage:      required
+	Value type: <string>
+	Definition: RPMh resource name which encodes the the specific instance
+		    of a given type of regulator (LDO, SMPS, VS, etc) within
+		    a particular PMIC found in the system.  This name must match
+		    to one that is defined by the bootloader.
+
+- qcom,regulator-type
+	Usage:      required if qcom,supported-modes is specified or if
+		    qcom,init-mode is specified in any subnodes
+	Value type: <string>
+	Definition: The physical type of the regulator including the PMIC
+		    family.  This is used for mode control.  Supported values:
+		    "pmic4-ldo", "pmic4-hfsmps", "pmic4-ftsmps", "pmic4-bob",
+		    "pmic5-ldo", "pmic5-hfsmps", "pmic5-ftsmps", and
+		    "pmic5-bob".
+
+- qcom,always-wait-for-ack
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the application processor
+		    must wait for an ACK or a NACK from RPMh for every request
+		    sent for this regulator including those which are for a
+		    strictly lower power state.
+
+- <regulator-name>-parent-supply
+	Usage:      optional
+	Value type: <phandle>
+	Definition: phandle of the parent supply regulator of one of the
+		    regulators for this RPMh resource.  The property name is
+		    defined by the value specified for the regulator-name
+		    property.
+
+- qcom,supported-modes
+	Usage:      optional; VRM regulators only
+	Value type: <prop-encoded-array>
+	Definition: A list of integers specifying the PMIC regulator modes
+		    supported by this regulator.  Supported values are
+		    RPMH_REGULATOR_MODE_* (i.e. 0 to 4).  Elements must be
+		    specified in order from lowest to highest.
+
+- qcom,mode-threshold-currents
+	Usage:      required if qcom,supported-modes is specified
+	Value type: <prop-encoded-array>
+	Definition: A list of integers specifying minimum allowed current in
+		    microamps for each of the modes listed in
+		    qcom,supported-modes.  The first element should always be 0.
+		    Elements must be specified in order from lowest to highest.
+
+- qcom,send-defaults
+	Usage:      optional
+	Value type: <empty>
+	Definition: Boolean flag which indicates that the initial parameter
+		    values should be sent to RPMh before consumers make their
+		    own requests.  If this flag is not specified, then initial
+		    parameters values will only be sent after some consumer
+		    makes a request.
+
+=========================================
+Second Level Nodes - Regulator Interfaces
+=========================================
+
+- regulator-name
+	Usage:      required
+	Value type: <string>
+	Definition: Specifies the name for this RPMh regulator.
+
+- regulator-min-microvolt
+	Usage:      required
+	Value type: <u32>
+	Definition: For VRM resources, this is the minimum supported voltage in
+		    microvolts.  For ARC resources, this is the minimum
+		    supported voltage level from RPMH_REGULATOR_LEVEL_*.
+
+- regulator-max-microvolt
+	Usage:      required
+	Value type: <u32>
+	Definition: For VRM resources, this is the maximum supported voltage in
+		    microvolts.  For ARC resources, this is the maximum
+		    supported voltage level from RPMH_REGULATOR_LEVEL_*.
+
+ - regulator-enable-ramp-delay
+	Usage:      optional
+	Value type: <u32>
+	Definition: For VRM and XOB resources, the time in microseconds to delay
+		    after enabling a regulator.
+
+- qcom,set
+	Usage:      required
+	Value type: <u32>
+	Definition: Specifies which sets that requests made with this regulator
+		    interface should be sent to.  Regulator requests sent in the
+		    active set take effect immediately.  Requests sent in the
+		    sleep set take effect when the Apps processor transitions
+		    into RPMh assisted power collapse.  Supported values are
+		    one of RPMH_REGULATOR_SET_* (i.e. 1, 2, or 3).
+
+- qcom,init-enable
+	Usage:      optional; VRM and XOB regulators only
+	Value type: <u32>
+	Definition: Specifies the initial enable state to request for a VRM
+		    regulator.  Supported values are 0 (regulator disabled) and
+		    1 (regulator enabled).
+
+- qcom,init-voltage
+	Usage:      optional; VRM regulators only
+	Value type: <u32>
+	Definition: Specifies the initial voltage in microvolts to request for a
+		    VRM regulator.  Supported values are 0 to 8191000.
+
+- qcom,init-mode
+	Usage:      optional; VRM regulators only
+	Value type: <u32>
+	Definition: Specifies the initial mode to request for a VRM regulator.
+		    Supported values are RPMH_REGULATOR_MODE_* (i.e. 0 to 4).
+
+- qcom,init-headroom-voltage
+	Usage:      optional; VRM regulators only
+	Value type: <u32>
+	Definition: Specifies the initial headroom voltage in microvolts to
+		    request for a VRM regulator.  RPMh ensures that the parent
+		    of this regulator outputs a voltage high enough to satisfy
+		    the requested headroom.  Supported values are 0 to 511000.
+
+- qcom,init-voltage-level
+	Usage:      optional; ARC regulators only
+	Value type: <u32>
+	Definition: Specifies the initial voltage level to request for an ARC
+		    regulator.  Supported values are RPMH_REGULATOR_LEVEL_*
+		    (i.e. 1 to ~513).
+
+- qcom,min-dropout-voltage
+	Usage:      optional; VRM regulators only
+	Value type: <u32>
+	Definition: Specifies the minimum voltage in microvolts that the parent
+		    supply regulator must output above the output of this
+		    regulator.  It is only meaningful if the property
+		    <regulator-name>-parent-supply has been specified in the
+		    first level node.
+
+- qcom,min-dropout-voltage-level
+	Usage:      optional; ARC regulators only
+	Value type: <u32>
+	Definition: Specifies the minimum voltage level difference that the
+		    parent supply regulator must output above the output of this
+		    regulator.  It is only meaningful if the property
+		    <regulator-name>-parent-supply has been specified in the
+		    first level node.
+
+========
+Examples
+========
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
+&apps_rsc {
+	rpmh-regulator-cxlvl {
+		compatible = "qcom,rpmh-arc-regulator";
+		qcom,resource-name = "cx.lvl";
+		qcom,send-defaults;
+		pm8998_s9_level: regulator-s9-level {
+			regulator-name = "pm8998_s9_level";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt =
+					<RPMH_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+			qcom,init-voltage-level = <RPMH_REGULATOR_LEVEL_TURBO>;
+		};
+
+		pm8998_s9_level_ao: regulator-s9-level-ao {
+			regulator-name = "pm8998_s9_level_ao";
+			qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+			regulator-min-microvolt =
+					<RPMH_REGULATOR_LEVEL_RETENTION>;
+			regulator-max-microvolt = <RPMH_REGULATOR_LEVEL_MAX>;
+		};
+	};
+
+	rpmh-regulator-smpa2 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		qcom,resource-name = "smpa2";
+		qcom,regulator-type = "pmic4-smps";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_AUTO
+			 RPMH_REGULATOR_MODE_HPM>;
+		qcom,mode-threshold-currents = <0 2000000>;
+		pm8998_s2: regulator-s2 {
+			regulator-name = "pm8998_s2";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1100000>;
+			regulator-max-microvolt = <1200000>;
+			regulator-enable-ramp-delay = <200>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_AUTO>;
+			qcom,init-voltage = <1150000>;
+		};
+	};
+
+	rpmh-regulator-ldoa4 {
+		compatible = "qcom,rpmh-vrm-regulator";
+		qcom,resource-name = "ldoa4";
+		qcom,regulator-type = "pmic4-ldo";
+		pm8998_l4-parent-supply = <&pm8998_s2>;
+		pm8998_l4: regulator-l4 {
+			regulator-name = "pm8998_l4";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1000000>;
+			qcom,init-voltage = <1000000>;
+		};
+	};
+
+	rpmh-regulator-ldoc1 {
+		compatible = "qcom,rpmh-xob-regulator";
+		qcom,resource-name = "ldoc1";
+		pm8150l_l1: regulator-pm8150l-l1 {
+			regulator-name = "pm8150l_l1";
+			qcom,set = <RPMH_REGULATOR_SET_ALL>;
+			regulator-min-microvolt = <1800000>;
+			regulator-max-microvolt = <1800000>;
+		};
+	};
+};
+
+&disp_rsc {
+	rpmh-regulator-ldoa3-disp {
+		compatible = "qcom,rpmh-vrm-regulator";
+		qcom,resource-name = "ldoa3";
+		qcom,regulator-type = "pmic4-ldo";
+		qcom,supported-modes =
+			<RPMH_REGULATOR_MODE_LPM
+			 RPMH_REGULATOR_MODE_HPM>;
+		qcom,mode-threshold-currents = <0 10000>;
+		qcom,always-wait-for-ack;
+		pm8998_l3_disp_ao: regulator-l3-ao {
+			regulator-name = "pm8998_l3_disp_ao";
+			qcom,set = <RPMH_REGULATOR_SET_ACTIVE>;
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,init-voltage = <1000000>;
+			qcom,init-headroom-voltage = <60000>;
+		};
+		pm8998_l3_disp_so: regulator-l3-so {
+			regulator-name = "pm8998_l3_disp_so";
+			qcom,set = <RPMH_REGULATOR_SET_SLEEP>;
+			regulator-min-microvolt = <1000000>;
+			regulator-max-microvolt = <1200000>;
+			qcom,init-mode = <RPMH_REGULATOR_MODE_LPM>;
+			qcom,init-voltage = <1000000>;
+			qcom,init-enable = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index 37a14f5..5f33420 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -1,8 +1,28 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+&soc {
+	led_flash_rear: qcom,camera-flash0 {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+
+	led_flash_rear_aux: qcom,camera-flash1 {
+		cell-index = <1>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+};
+
 &cam_cci0 {
 	qcom,cam-res-mgr {
 		compatible = "qcom,cam-res-mgr";
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index 37a14f5..5f33420 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -1,8 +1,28 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
+&soc {
+	led_flash_rear: qcom,camera-flash0 {
+		cell-index = <0>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+
+	led_flash_rear_aux: qcom,camera-flash1 {
+		cell-index = <1>;
+		compatible = "qcom,camera-flash";
+		flash-source = <&pm8150l_flash0 &pm8150l_flash1>;
+		torch-source = <&pm8150l_torch0 &pm8150l_torch1>;
+		switch-source = <&pm8150l_switch2>;
+		status = "ok";
+	};
+};
+
 &cam_cci0 {
 	qcom,cam-res-mgr {
 		compatible = "qcom,cam-res-mgr";
diff --git a/drivers/bus/mhi/controllers/mhi_qcom.c b/drivers/bus/mhi/controllers/mhi_qcom.c
index eb3a9c5..c9bc1c9 100644
--- a/drivers/bus/mhi/controllers/mhi_qcom.c
+++ b/drivers/bus/mhi/controllers/mhi_qcom.c
@@ -607,7 +607,8 @@
 	mhi_cntrl->fw_image = firmware_info->fw_image;
 	mhi_cntrl->edl_image = firmware_info->edl_image;
 
-	sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, &mhi_qcom_group);
+	if (sysfs_create_group(&mhi_cntrl->mhi_dev->dev.kobj, &mhi_qcom_group))
+		MHI_ERR("Error while creating the sysfs group\n");
 
 	return mhi_cntrl;
 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index b3c384a..2d59e72 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -31,6 +31,8 @@
 #include <linux/suspend.h>
 #include <linux/syscore_ops.h>
 #include <linux/tick.h>
+#include <linux/sched/topology.h>
+
 #include <trace/events/power.h>
 
 static LIST_HEAD(cpufreq_policy_list);
@@ -1075,7 +1077,8 @@
 	if (has_target()) {
 		ret = cpufreq_start_governor(policy);
 		if (ret)
-			pr_err("%s: Failed to start governor\n", __func__);
+			pr_err("%s: Failed to start governor for CPU%u, policy CPU%u\n",
+			       __func__, cpu, policy->cpu);
 	}
 	up_write(&policy->rwsem);
 	return ret;
@@ -1976,15 +1979,6 @@
 	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
 		 policy->cpu, target_freq, relation, old_target_freq);
 
-	/*
-	 * This might look like a redundant call as we are checking it again
-	 * after finding index. But it is left intentionally for cases where
-	 * exactly same freq is called again and so we can save on few function
-	 * calls.
-	 */
-	if (target_freq == policy->cur)
-		return 0;
-
 	/* Save last value to restore later on errors */
 	policy->restore_freq = policy->cur;
 
@@ -2543,7 +2537,7 @@
 	hp_online = ret;
 	ret = 0;
 
-	pr_debug("driver %s up and running\n", driver_data->name);
+	pr_info("driver %s up and running\n", driver_data->name);
 	goto out;
 
 err_if_unreg:
@@ -2575,7 +2569,7 @@
 	if (!cpufreq_driver || (driver != cpufreq_driver))
 		return -EINVAL;
 
-	pr_debug("unregistering driver %s\n", driver->name);
+	pr_info("unregistering driver %s\n", driver->name);
 
 	/* Protect against concurrent cpu hotplug */
 	cpus_read_lock();
diff --git a/drivers/cpufreq/qcom-cpufreq.c b/drivers/cpufreq/qcom-cpufreq.c
index 412ab64..45d02f0 100644
--- a/drivers/cpufreq/qcom-cpufreq.c
+++ b/drivers/cpufreq/qcom-cpufreq.c
@@ -4,7 +4,7 @@
  * MSM architecture cpufreq driver
  *
  * Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2017,2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2019, The Linux Foundation. All rights reserved.
  * Author: Mike A. Chan <mikechan@google.com>
  *
  */
@@ -19,10 +19,13 @@
 #include <linux/err.h>
 #include <linux/platform_device.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/cpu_cooling.h>
 #include <trace/events/power.h>
 
 static DEFINE_MUTEX(l2bw_lock);
 
+static struct thermal_cooling_device *cdev[NR_CPUS];
 static struct clk *cpu_clk[NR_CPUS];
 static struct clk *l2_clk;
 static DEFINE_PER_CPU(struct cpufreq_frequency_table *, freq_table);
@@ -34,6 +37,8 @@
 };
 
 static DEFINE_PER_CPU(struct cpufreq_suspend_t, suspend_data);
+static DEFINE_PER_CPU(int, cached_resolve_idx);
+static DEFINE_PER_CPU(unsigned int, cached_resolve_freq);
 
 static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq,
 			unsigned int index)
@@ -53,8 +58,11 @@
 	rate = clk_round_rate(cpu_clk[policy->cpu], rate);
 	ret = clk_set_rate(cpu_clk[policy->cpu], rate);
 	cpufreq_freq_transition_end(policy, &freqs, ret);
-	if (!ret)
+	if (!ret) {
+		arch_set_freq_scale(policy->related_cpus, new_freq,
+				    policy->cpuinfo.max_freq);
 		trace_cpu_frequency_switch_end(policy->cpu);
+	}
 
 	return ret;
 }
@@ -63,12 +71,16 @@
 				unsigned int target_freq,
 				unsigned int relation)
 {
-	int ret = -EFAULT;
+	int ret = 0;
 	int index;
 	struct cpufreq_frequency_table *table;
+	int first_cpu = cpumask_first(policy->related_cpus);
 
 	mutex_lock(&per_cpu(suspend_data, policy->cpu).suspend_mutex);
 
+	if (target_freq == policy->cur)
+		goto done;
+
 	if (per_cpu(suspend_data, policy->cpu).device_suspended) {
 		pr_debug("cpufreq: cpu%d scheduling frequency change in suspend\n",
 			 policy->cpu);
@@ -77,7 +89,11 @@
 	}
 
 	table = policy->freq_table;
-	index = cpufreq_frequency_table_target(policy, target_freq, relation);
+	if (per_cpu(cached_resolve_freq, first_cpu) == target_freq)
+		index = per_cpu(cached_resolve_idx, first_cpu);
+	else
+		index = cpufreq_frequency_table_target(policy, target_freq,
+						       relation);
 
 	pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
 		policy->cpu, target_freq, relation,
@@ -90,6 +106,23 @@
 	return ret;
 }
 
+static unsigned int msm_cpufreq_resolve_freq(struct cpufreq_policy *policy,
+					     unsigned int target_freq)
+{
+	int index;
+	int first_cpu = cpumask_first(policy->related_cpus);
+	unsigned int freq;
+
+	index = cpufreq_frequency_table_target(policy, target_freq,
+					       CPUFREQ_RELATION_L);
+	freq = policy->freq_table[index].frequency;
+
+	per_cpu(cached_resolve_idx, first_cpu) = index;
+	per_cpu(cached_resolve_freq, first_cpu) = freq;
+
+	return freq;
+}
+
 static int msm_cpufreq_verify(struct cpufreq_policy *policy)
 {
 	cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
@@ -143,6 +176,7 @@
 	pr_debug("cpufreq: cpu%d init at %d switching to %d\n",
 			policy->cpu, cur_freq, table[index].frequency);
 	policy->cur = table[index].frequency;
+	policy->dvfs_possible_from_any_cpu = true;
 
 	return 0;
 }
@@ -268,6 +302,41 @@
 	NULL,
 };
 
+static void msm_cpufreq_ready(struct cpufreq_policy *policy)
+{
+	struct device_node *np, *lmh_node;
+	unsigned int cpu = policy->cpu;
+
+	if (cdev[cpu])
+		return;
+
+	np = of_cpu_device_node_get(cpu);
+	if (WARN_ON(!np))
+		return;
+
+	/*
+	 * For now, just loading the cooling device;
+	 * thermal DT code takes care of matching them.
+	 */
+	if (of_find_property(np, "#cooling-cells", NULL)) {
+		lmh_node = of_parse_phandle(np, "qcom,lmh-dcvs", 0);
+		if (lmh_node) {
+			of_node_put(lmh_node);
+			goto ready_exit;
+		}
+
+		cdev[cpu] = of_cpufreq_cooling_register(policy);
+		if (IS_ERR(cdev[cpu])) {
+			pr_err("running cpufreq for CPU%d without cooling dev: %ld\n",
+			       cpu, PTR_ERR(cdev[cpu]));
+			cdev[cpu] = NULL;
+		}
+	}
+
+ready_exit:
+	of_node_put(np);
+}
+
 static struct cpufreq_driver msm_cpufreq_driver = {
 	/* lps calculations are handled here. */
 	.flags		= CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS |
@@ -275,9 +344,11 @@
 	.init		= msm_cpufreq_init,
 	.verify		= msm_cpufreq_verify,
 	.target		= msm_cpufreq_target,
+	.resolve_freq	= msm_cpufreq_resolve_freq,
 	.get		= msm_cpufreq_get_freq,
 	.name		= "msm",
 	.attr		= msm_freq_attr,
+	.ready		= msm_cpufreq_ready,
 };
 
 static struct cpufreq_frequency_table *cpufreq_parse_dt(struct device *dev,
@@ -370,7 +441,7 @@
 	if (!IS_ERR(ftbl)) {
 		for_each_possible_cpu(cpu)
 			per_cpu(freq_table, cpu) = ftbl;
-		return 0;
+		goto out_register;
 	}
 
 	/*
@@ -410,6 +481,7 @@
 		per_cpu(freq_table, cpu) = ftbl;
 	}
 
+out_register:
 	ret = register_pm_notifier(&msm_cpufreq_pm_notifier);
 	if (ret)
 		return ret;
@@ -441,6 +513,7 @@
 	for_each_possible_cpu(cpu) {
 		mutex_init(&(per_cpu(suspend_data, cpu).suspend_mutex));
 		per_cpu(suspend_data, cpu).device_suspended = 0;
+		per_cpu(cached_resolve_freq, cpu) = UINT_MAX;
 	}
 
 	rc = platform_driver_register(&msm_cpufreq_plat_driver);
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
index 21fc9c6..f48fea1 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/delay.h>
@@ -18,6 +18,7 @@
 #include "cpastop_v175_100.h"
 #include "cpastop_v175_101.h"
 #include "cpastop_v175_120.h"
+#include "cpastop_v480_100.h"
 
 struct cam_camnoc_info *camnoc_info;
 
@@ -110,6 +111,10 @@
 			(hw_caps->cpas_version.minor == 0) &&
 			(hw_caps->cpas_version.incr == 0))
 			soc_info->hw_version = CAM_CPAS_TITAN_150_V100;
+	} else if ((hw_caps->camera_version.major == 4) &&
+		(hw_caps->camera_version.minor == 8) &&
+		(hw_caps->camera_version.incr == 0)) {
+		soc_info->hw_version = CAM_CPAS_TITAN_480_V100;
 	}
 
 	CAM_DBG(CAM_CPAS, "CPAS HW VERSION %x", soc_info->hw_version);
@@ -589,6 +594,9 @@
 	case CAM_CPAS_TITAN_150_V100:
 		camnoc_info = &cam150_cpas100_camnoc_info;
 		break;
+	case CAM_CPAS_TITAN_480_V100:
+		camnoc_info = &cam480_cpas100_camnoc_info;
+		break;
 	default:
 		CAM_ERR(CAM_CPAS, "Camera Version not supported %d.%d.%d",
 			hw_caps->camera_version.major,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
index 1e1c9fa..8a8c61a 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cam_cpastop_hw.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_CPASTOP_HW_H_
@@ -70,6 +70,10 @@
  * @CAM_CAMNOC_CDM: Indicates CDM HW connection to camnoc
  * @CAM_CAMNOC_IFE02: Indicates IFE0, IFE2 HW connection to camnoc
  * @CAM_CAMNOC_IFE13: Indicates IFE1, IFE3 HW connection to camnoc
+ * @CAM_CAMNOC_IFE_LINEAR: Indicates linear data from all IFEs to cammnoc
+ * @CAM_CAMNOC_IFE_UBWC_STATS: Indicates ubwc+stats from all IFEs to cammnoc
+ * @CAM_CAMNOC_IFE_RDI_WR: Indicates RDI write data from all IFEs to cammnoc
+ * @CAM_CAMNOC_IFE_RDI_RD: Indicates RDI read data from all IFEs to cammnoc
  * @CAM_CAMNOC_IFE0123_RDI_WRITE: RDI write only for all IFEx
  * @CAM_CAMNOC_IFE0_NRDI_WRITE: IFE0 non-RDI write
  * @CAM_CAMNOC_IFE01_RDI_READ: IFE0/1 RDI READ
@@ -80,6 +84,10 @@
  *         connection to camnoc
  * @CAM_CAMNOC_IPE_VID_DISP_WRITE: Indicates IPE's VID/DISP Wrire HW
  *         connection to camnoc
+ * @CAM_CAMNOC_IPE0_RD: Indicates IPE's Read0 HW connection to camnoc
+ * @CAM_CAMNOC_IPE1_BPS_RD: Indicates IPE's Read1 + BPS Read HW connection
+ *         to camnoc
+ * @CAM_CAMNOC_IPE_BPS_WR: Indicates IPE+BPS Write HW connection to camnoc
  * @CAM_CAMNOC_JPEG: Indicates JPEG HW connection to camnoc
  * @CAM_CAMNOC_FD: Indicates FD HW connection to camnoc
  * @CAM_CAMNOC_ICP: Indicates ICP HW connection to camnoc
@@ -88,6 +96,10 @@
 	CAM_CAMNOC_CDM,
 	CAM_CAMNOC_IFE02,
 	CAM_CAMNOC_IFE13,
+	CAM_CAMNOC_IFE_LINEAR,
+	CAM_CAMNOC_IFE_UBWC_STATS,
+	CAM_CAMNOC_IFE_RDI_WR,
+	CAM_CAMNOC_IFE_RDI_RD,
 	CAM_CAMNOC_IFE0123_RDI_WRITE,
 	CAM_CAMNOC_IFE0_NRDI_WRITE,
 	CAM_CAMNOC_IFE01_RDI_READ,
@@ -95,6 +107,9 @@
 	CAM_CAMNOC_IPE_BPS_LRME_READ,
 	CAM_CAMNOC_IPE_BPS_LRME_WRITE,
 	CAM_CAMNOC_IPE_VID_DISP_WRITE,
+	CAM_CAMNOC_IPE0_RD,
+	CAM_CAMNOC_IPE1_BPS_RD,
+	CAM_CAMNOC_IPE_BPS_WR,
 	CAM_CAMNOC_JPEG,
 	CAM_CAMNOC_FD,
 	CAM_CAMNOC_ICP,
diff --git a/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v480_100.h b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v480_100.h
new file mode 100644
index 0000000..d4782d7
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_cpas/cpas_top/cpastop_v480_100.h
@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _CPASTOP_V480_100_H_
+#define _CPASTOP_V480_100_H_
+
+#define TEST_IRQ_ENABLE 0
+
+static struct cam_camnoc_irq_sbm cam_cpas_v480_100_irq_sbm = {
+	.sbm_enable = {
+		.access_type = CAM_REG_TYPE_READ_WRITE,
+		.enable = false,
+		.offset = 0x2040, /* SBM_FAULTINEN0_LOW */
+		.value = 0x1 | /* SBM_FAULTINEN0_LOW_PORT0_MASK*/
+			0x2 | /* SBM_FAULTINEN0_LOW_PORT1_MASK */
+			0x4 | /* SBM_FAULTINEN0_LOW_PORT2_MASK */
+			0x8 | /* SBM_FAULTINEN0_LOW_PORT3_MASK */
+			0x10 | /* SBM_FAULTINEN0_LOW_PORT4_MASK */
+			0x20 | /* SBM_FAULTINEN0_LOW_PORT5_MASK */
+			(TEST_IRQ_ENABLE ?
+			0x100 : /* SBM_FAULTINEN0_LOW_PORT8_MASK */
+			0x0),
+	},
+	.sbm_status = {
+		.access_type = CAM_REG_TYPE_READ,
+		.enable = true,
+		.offset = 0x2048, /* SBM_FAULTINSTATUS0_LOW */
+	},
+	.sbm_clear = {
+		.access_type = CAM_REG_TYPE_WRITE,
+		.enable = true,
+		.offset = 0x2080, /* SBM_FLAGOUTCLR0_LOW */
+		.value = TEST_IRQ_ENABLE ? 0x6 : 0x2,
+	}
+};
+
+static struct cam_camnoc_irq_err
+	cam_cpas_v480_100_irq_err[] = {
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_SLAVE_ERROR,
+		.enable = true,
+		.sbm_port = 0x1, /* SBM_FAULTINSTATUS0_LOW_PORT0_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2708, /* ERRLOGGER_MAINCTL_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2710, /* ERRLOGGER_ERRVLD_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x2718, /* ERRLOGGER_ERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE02_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x2, /* SBM_FAULTINSTATUS0_LOW_PORT1_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x5a0, /* IFE02_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x590, /* IFE02_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x598, /* IFE02_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IFE13_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x4, /* SBM_FAULTINSTATUS0_LOW_PORT2_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x9a0, /* IFE13_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x990, /* IFE13_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x998, /* IFE13_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_DECODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x8, /* SBM_FAULTINSTATUS0_LOW_PORT3_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0xd20, /* IBL_RD_DECERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0xd10, /* IBL_RD_DECERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0xd18, /* IBL_RD_DECERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_IPE_BPS_UBWC_ENCODE_ERROR,
+		.enable = true,
+		.sbm_port = 0x10, /* SBM_FAULTINSTATUS0_LOW_PORT4_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x11a0, /* IBL_WR_ENCERREN_LOW */
+			.value = 1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x1190,
+			/* IBL_WR_ENCERRSTATUS_LOW */
+		},
+		.err_clear = {
+			.access_type = CAM_REG_TYPE_WRITE,
+			.enable = true,
+			.offset = 0x1198, /* IBL_WR_ENCERRCLR_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_AHB_TIMEOUT,
+		.enable = true,
+		.sbm_port = 0x20, /* SBM_FAULTINSTATUS0_LOW_PORT5_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x1,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED1,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_RESERVED2,
+		.enable = false,
+	},
+	{
+		.irq_type = CAM_CAMNOC_HW_IRQ_CAMNOC_TEST,
+		.enable = TEST_IRQ_ENABLE ? true : false,
+		.sbm_port = 0x100, /* SBM_FAULTINSTATUS0_LOW_PORT8_MASK */
+		.err_enable = {
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.enable = true,
+			.offset = 0x2088, /* SBM_FLAGOUTSET0_LOW */
+			.value = 0x5,
+		},
+		.err_status = {
+			.access_type = CAM_REG_TYPE_READ,
+			.enable = true,
+			.offset = 0x2090, /* SBM_FLAGOUTSTATUS0_LOW */
+		},
+		.err_clear = {
+			.enable = false,
+		},
+	},
+};
+
+static struct cam_camnoc_specific
+	cam_cpas_v480_100_camnoc_specific[] = {
+	{
+		.port_type = CAM_CAMNOC_CDM,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x30, /* CDM_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x34, /* CDM_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x38, /* CDM_URGENCY_LOW */
+			.mask = 0x7, /* CDM_URGENCY_LOW_READ_MASK */
+			.shift = 0x0, /* CDM_URGENCY_LOW_READ_SHIFT */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x40, /* CDM_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x48, /* CDM_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_FD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x630, /* FD_PRIORITYLUT_LOW */
+			.value = 0x44444444,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x634, /* FD_PRIORITYLUT_HIGH */
+			.value = 0x44444444,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x638, /* FD_URGENCY_LOW */
+			.value = 0x2,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x640, /* FD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x648, /* FD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_LINEAR,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xA30, /* IFE_LINEAR_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0xA34, /* IFE_LINEAR_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0xA38, /* IFE_LINEAR_URGENCY_LOW */
+			/* IFE_LINEAR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_LINEAR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0xA40, /* IFE_LINEAR_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0xA48, /* IFE_LINEAR_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_RDI_RD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1030, /* IFE_RDI_RD_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1034, /* IFE_RDI_RD_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1038, /* IFE_RDI_RD_URGENCY_LOW */
+			/* IFE_RDI_RD_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_RDI_RD_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1040, /* IFE_RDI_RD_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1048, /* IFE_RDI_RD_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_RDI_WR,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1430, /* IFE_RDI_WR_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1434, /* IFE_RDI_WR_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1438, /* IFE_RDI_WR_URGENCY_LOW */
+			/* IFE_RDI_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_RDI_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1440, /* IFE_RDI_WR_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1448, /* IFE_RDI_WR_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IFE_UBWC_STATS,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1A30, /* IFE_UBWC_STATS_PRIORITYLUT_LOW */
+			.value = 0x66665433,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1A34, /* IFE_UBWC_STATS_PRIORITYLUT_HIGH */
+			.value = 0x66666666,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1A38, /* IFE_UBWC_STATS_URGENCY_LOW */
+			/* IFE_UBWC_STATS_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IFE_UBWC_STATS_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1A40, /* IFE_UBWC_STATS_DANGERLUT_LOW */
+			.value = 0xFFFFFF00,
+		},
+		.safe_lut = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.offset = 0x1A48, /* IFE_UBWC_STATS_SAFELUT_LOW */
+			.value = 0x1,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1B88, /* IFE_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE0_RD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E30, /* IPE0_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E34, /* IPE0_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x1E38, /* IPE0_RD_URGENCY_LOW */
+			/* IPE0_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* IPE0_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E40, /* IPE0_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1E48, /* IPE0_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x1F08, /* IPE0_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE1_BPS_RD,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2430, /* IPE1_BPS_RD_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2434, /* IPE1_BPS_RD_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2438, /* IPE1_BPS_RD_URGENCY_LOW */
+			/* IPE1_BPS_RD_URGENCY_LOW_READ_MASK */
+			.mask = 0x7,
+			/* IPE1_BPS_RD_URGENCY_LOW_READ_SHIFT */
+			.shift = 0x0,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2440, /* IPE1_BPS_RD_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2448, /* IPE1_BPS_RD_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2508, /* IPE1_BPS_RD_DECCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_IPE_BPS_WR,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2830, /* IPE_BPS_WR_PRIORITYLUT_LOW */
+			.value = 0x33333333,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2834, /* IPE_BPS_WR_PRIORITYLUT_HIGH */
+			.value = 0x33333333,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 1,
+			.offset = 0x2838, /* IPE_BPS_WR_URGENCY_LOW */
+			/* IPE_BPS_WR_URGENCY_LOW_WRITE_MASK */
+			.mask = 0x70,
+			/* IPE_BPS_WR_URGENCY_LOW_WRITE_SHIFT */
+			.shift = 0x4,
+			.value = 3,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2840, /* IPE_BPS_WR_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2848, /* IPE_BPS_WR_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			/*
+			 * Do not explicitly set ubwc config register.
+			 * Power on default values are taking care of required
+			 * register settings.
+			 */
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2988, /* IPE_BPS_WR_ENCCTL_LOW */
+			.value = 1,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_JPEG,
+		.enable = true,
+		.priority_lut_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E30, /* JPEG_PRIORITYLUT_LOW */
+			.value = 0x22222222,
+		},
+		.priority_lut_high = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E34, /* JPEG_PRIORITYLUT_HIGH */
+			.value = 0x22222222,
+		},
+		.urgency = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E38, /* JPEG_URGENCY_LOW */
+			.value = 0x22,
+		},
+		.danger_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E40, /* JPEG_DANGERLUT_LOW */
+			.value = 0x0,
+		},
+		.safe_lut = {
+			.enable = false,
+			.access_type = CAM_REG_TYPE_READ_WRITE,
+			.masked_value = 0,
+			.offset = 0x2E48, /* JPEG_SAFELUT_LOW */
+			.value = 0x0,
+		},
+		.ubwc_ctl = {
+			.enable = false,
+		},
+	},
+	{
+		.port_type = CAM_CAMNOC_ICP,
+		.enable = true,
+		.flag_out_set0_low = {
+			.enable = true,
+			.access_type = CAM_REG_TYPE_WRITE,
+			.masked_value = 0,
+			.offset = 0x2088,
+			.value = 0x100000,
+		},
+	},
+};
+
+static struct cam_camnoc_err_logger_info cam480_cpas100_err_logger_offsets = {
+	.mainctrl     =  0x7008, /* ERRLOGGER_MAINCTL_LOW */
+	.errvld       =  0x7010, /* ERRLOGGER_ERRVLD_LOW */
+	.errlog0_low  =  0x7020, /* ERRLOGGER_ERRLOG0_LOW */
+	.errlog0_high =  0x7024, /* ERRLOGGER_ERRLOG0_HIGH */
+	.errlog1_low  =  0x7028, /* ERRLOGGER_ERRLOG1_LOW */
+	.errlog1_high =  0x702c, /* ERRLOGGER_ERRLOG1_HIGH */
+	.errlog2_low  =  0x7030, /* ERRLOGGER_ERRLOG2_LOW */
+	.errlog2_high =  0x7034, /* ERRLOGGER_ERRLOG2_HIGH */
+	.errlog3_low  =  0x7038, /* ERRLOGGER_ERRLOG3_LOW */
+	.errlog3_high =  0x703c, /* ERRLOGGER_ERRLOG3_HIGH */
+};
+
+static struct cam_cpas_hw_errata_wa_list cam480_cpas100_errata_wa_list = {
+	.camnoc_flush_slave_pending_trans = {
+		.enable = false,
+		.data.reg_info = {
+			.access_type = CAM_REG_TYPE_READ,
+			.offset = 0x2100, /* SidebandManager_SenseIn0_Low */
+			.mask = 0xE0000, /* Bits 17, 18, 19 */
+			.value = 0, /* expected to be 0 */
+		},
+	},
+};
+
+static struct cam_camnoc_info cam480_cpas100_camnoc_info = {
+	.specific = &cam_cpas_v480_100_camnoc_specific[0],
+	.specific_size = ARRAY_SIZE(cam_cpas_v480_100_camnoc_specific),
+	.irq_sbm = &cam_cpas_v480_100_irq_sbm,
+	.irq_err = &cam_cpas_v480_100_irq_err[0],
+	.irq_err_size = ARRAY_SIZE(cam_cpas_v480_100_irq_err),
+	.err_logger = &cam480_cpas100_err_logger_offsets,
+	.errata_wa_list = &cam480_cpas100_errata_wa_list,
+};
+
+#endif /* _CPASTOP_V480_100_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
index 95930be..f0dcc5f 100644
--- a/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
+++ b/drivers/media/platform/msm/camera/cam_cpas/include/cam_cpas_api.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_CPAS_API_H_
@@ -41,6 +41,7 @@
 	CAM_CPAS_TITAN_175_V100 = 0x175100,
 	CAM_CPAS_TITAN_175_V101 = 0x175101,
 	CAM_CPAS_TITAN_175_V120 = 0x175120,
+	CAM_CPAS_TITAN_480_V100 = 0x480100,
 	CAM_CPAS_TITAN_MAX
 };
 
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index 6ba2ad9..1539fe5 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/slab.h>
@@ -5147,14 +5147,6 @@
 		evt_payload, evt_payload->core_index);
 	CAM_DBG(CAM_ISP, "bus_irq_status_0: = %x", evt_payload->irq_reg_val[0]);
 	CAM_DBG(CAM_ISP, "bus_irq_status_1: = %x", evt_payload->irq_reg_val[1]);
-	CAM_DBG(CAM_ISP, "bus_irq_status_2: = %x", evt_payload->irq_reg_val[2]);
-	CAM_DBG(CAM_ISP, "bus_irq_comp_err: = %x", evt_payload->irq_reg_val[3]);
-	CAM_DBG(CAM_ISP, "bus_irq_comp_owrt: = %x",
-		evt_payload->irq_reg_val[4]);
-	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_err: = %x",
-		evt_payload->irq_reg_val[5]);
-	CAM_DBG(CAM_ISP, "bus_irq_dual_comp_owrt: = %x",
-		evt_payload->irq_reg_val[6]);
 	/* WM Done */
 	return cam_ife_hw_mgr_handle_buf_done_for_hw_res(ife_hwr_mgr_ctx,
 		evt_payload_priv);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index c5a1e02..61073c3 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_HW_INTF_H_
@@ -67,6 +67,12 @@
 	CAM_IFE_BUS_IRQ_REGISTERS_MAX,
 };
 
+enum cam_vfe_bus_ver3_irq_regs {
+	CAM_IFE_IRQ_BUS_VER3_REG_STATUS0             = 0,
+	CAM_IFE_IRQ_BUS_VER3_REG_STATUS1             = 1,
+	CAM_IFE_IRQ_BUS_VER3_REG_MAX,
+};
+
 enum cam_vfe_reset_type {
 	CAM_VFE_HW_RESET_HW_AND_REG,
 	CAM_VFE_HW_RESET_HW,
@@ -257,8 +263,11 @@
 	struct list_head            list;
 	uint32_t                    core_index;
 	uint32_t                    debug_status_0;
+	uint32_t                    ccif_violation_status;
+	uint32_t                    overflow_status;
+	uint32_t                    image_size_violation_status;
 	uint32_t                    evt_id;
-	uint32_t                    irq_reg_val[CAM_IFE_BUS_IRQ_REGISTERS_MAX];
+	uint32_t                    irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_MAX];
 	uint32_t                    error_type;
 	struct cam_isp_timestamp    ts;
 	void                       *ctx;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c
index 9c2daaa..7660530 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe17x.c
@@ -1,12 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/module.h>
 #include "cam_vfe170.h"
 #include "cam_vfe175.h"
 #include "cam_vfe175_130.h"
+#include "cam_vfe480.h"
 #include "cam_vfe_lite17x.h"
 #include "cam_vfe_hw_intf.h"
 #include "cam_vfe_core.h"
@@ -26,6 +27,10 @@
 		.data = &cam_vfe175_130_hw_info,
 	},
 	{
+		.compatible = "qcom,vfe480",
+		.data = &cam_vfe480_hw_info,
+	},
+	{
 		.compatible = "qcom,vfe-lite170",
 		.data = &cam_vfe_lite17x_hw_info,
 	},
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
new file mode 100644
index 0000000..d883001
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe17x/cam_vfe480.h
@@ -0,0 +1,974 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _CAM_VFE480_H_
+#define _CAM_VFE480_H_
+
+#include "cam_vfe_bus_ver3.h"
+#include "cam_irq_controller.h"
+#include "cam_vfe175.h"
+
+static struct cam_irq_register_set vfe480_bus_irq_reg[2] = {
+		{
+			.mask_reg_offset   = 0x0000AA18,
+			.clear_reg_offset  = 0x0000AA20,
+			.status_reg_offset = 0x0000AA28,
+		},
+		{
+			.mask_reg_offset   = 0x0000AA1C,
+			.clear_reg_offset  = 0x0000AA24,
+			.status_reg_offset = 0x0000AA2C,
+		},
+};
+
+static struct cam_vfe_bus_ver3_reg_offset_ubwc_client
+	vfe480_ubwc_regs_client_0 = {
+	.meta_addr        = 0x0000AC40,
+	.meta_cfg         = 0x0000AC44,
+	.mode_cfg         = 0x0000AC48,
+	.stats_ctrl       = 0x0000AC4C,
+	.ctrl_2           = 0x0000AC50,
+	.bw_limit         = 0x0000AC1C,
+};
+
+static struct cam_vfe_bus_ver3_reg_offset_ubwc_client
+	vfe480_ubwc_regs_client_1 = {
+	.meta_addr        = 0x0000AD40,
+	.meta_cfg         = 0x0000AD44,
+	.mode_cfg         = 0x0000AD48,
+	.stats_ctrl       = 0x0000AD4C,
+	.ctrl_2           = 0x0000AD50,
+	.bw_limit         = 0x0000AD1C,
+};
+
+static struct cam_vfe_bus_ver3_reg_offset_ubwc_client
+	vfe480_ubwc_regs_client_4 = {
+	.meta_addr        = 0x0000B040,
+	.meta_cfg         = 0x0000B044,
+	.mode_cfg         = 0x0000B048,
+	.stats_ctrl       = 0x0000B04C,
+	.ctrl_2           = 0x0000B050,
+	.bw_limit         = 0x0000B01C,
+};
+
+static struct cam_vfe_bus_ver3_reg_offset_ubwc_client
+	vfe480_ubwc_regs_client_5 = {
+	.meta_addr        = 0x0000B140,
+	.meta_cfg         = 0x0000B144,
+	.mode_cfg         = 0x0000B148,
+	.stats_ctrl       = 0x0000B14C,
+	.ctrl_2           = 0x0000B150,
+	.bw_limit         = 0x0000B11C,
+};
+
+static struct cam_vfe_bus_ver3_hw_info vfe480_bus_hw_info = {
+	.common_reg = {
+		.hw_version                       = 0x0000AA00,
+		.cgc_ovd                          = 0x0000AA08,
+		.comp_cfg_0                       = 0x0000AA0C,
+		.comp_cfg_1                       = 0x0000AA10,
+		.if_frameheader_cfg               = {
+			0x0000AA34,
+			0x0000AA38,
+			0x0000AA3C,
+			0x0000AA40,
+			0x0000AA44,
+			0x0000AA48,
+		},
+		.ubwc_static_ctrl                 = 0x0000AA58,
+		.pwr_iso_cfg                      = 0x0000AA5C,
+		.overflow_status_clear            = 0x0000AA60,
+		.ccif_violation_status            = 0x0000AA64,
+		.overflow_status                  = 0x0000AA68,
+		.image_size_violation_status      = 0x0000AA70,
+		.debug_status_cfg                 = 0x0000AAD4,
+		.debug_status_0                   = 0x0000AAD8,
+		.test_bus_ctrl                    = 0x0000AADC,
+		.irq_reg_info = {
+			.num_registers            = 2,
+			.irq_reg_set              = vfe480_bus_irq_reg,
+			.global_clear_offset      = 0x0000AA30,
+			.global_clear_bitmask     = 0x00000001,
+		},
+	},
+	.num_client = CAM_VFE_BUS_VER3_MAX_CLIENTS,
+	.bus_client_reg = {
+		/* BUS Client 0 FULL Y */
+		{
+			.cfg                      = 0x0000AC00,
+			.image_addr               = 0x0000AC04,
+			.frame_incr               = 0x0000AC08,
+			.image_cfg_0              = 0x0000AC0C,
+			.image_cfg_1              = 0x0000AC10,
+			.image_cfg_2              = 0x0000AC14,
+			.packer_cfg               = 0x0000AC18,
+			.frame_header_addr        = 0x0000AC20,
+			.frame_header_incr        = 0x0000AC24,
+			.frame_header_cfg         = 0x0000AC28,
+			.irq_subsample_period     = 0x0000AC30,
+			.irq_subsample_pattern    = 0x0000AC34,
+			.framedrop_period         = 0x0000AC38,
+			.framedrop_pattern        = 0x0000AC3C,
+			.system_cache_cfg         = 0x0000AC60,
+			.burst_limit              = 0x0000AC64,
+			.addr_status_0            = 0x0000AC68,
+			.addr_status_1            = 0x0000AC6C,
+			.addr_status_2            = 0x0000AC70,
+			.addr_status_3            = 0x0000AC74,
+			.debug_status_cfg         = 0x0000AC78,
+			.debug_status_0           = 0x0000AC7C,
+			.debug_status_1           = 0x0000AC80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_0,
+			.ubwc_regs                = &vfe480_ubwc_regs_client_0,
+		},
+		/* BUS Client 1 FULL C */
+		{
+			.cfg                      = 0x0000AD00,
+			.image_addr               = 0x0000AD04,
+			.frame_incr               = 0x0000AD08,
+			.image_cfg_0              = 0x0000AD0C,
+			.image_cfg_1              = 0x0000AD10,
+			.image_cfg_2              = 0x0000AD14,
+			.packer_cfg               = 0x0000AD18,
+			.frame_header_addr        = 0x0000AD20,
+			.frame_header_incr        = 0x0000AD24,
+			.frame_header_cfg         = 0x0000AD28,
+			.irq_subsample_period     = 0x0000AD30,
+			.irq_subsample_pattern    = 0x0000AD34,
+			.framedrop_period         = 0x0000AD38,
+			.framedrop_pattern        = 0x0000AD3C,
+			.system_cache_cfg         = 0x0000AD60,
+			.burst_limit              = 0x0000AD64,
+			.addr_status_0            = 0x0000AD68,
+			.addr_status_1            = 0x0000AD6C,
+			.addr_status_2            = 0x0000AD70,
+			.addr_status_3            = 0x0000AD74,
+			.debug_status_cfg         = 0x0000AD78,
+			.debug_status_0           = 0x0000AD7C,
+			.debug_status_1           = 0x0000AD80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_0,
+			.ubwc_regs                = &vfe480_ubwc_regs_client_1,
+		},
+		/* BUS Client 2 DS4 */
+		{
+			.cfg                      = 0x0000AE00,
+			.image_addr               = 0x0000AE04,
+			.frame_incr               = 0x0000AE08,
+			.image_cfg_0              = 0x0000AE0C,
+			.image_cfg_1              = 0x0000AE10,
+			.image_cfg_2              = 0x0000AE14,
+			.packer_cfg               = 0x0000AE18,
+			.frame_header_addr        = 0x0000AE20,
+			.frame_header_incr        = 0x0000AE24,
+			.frame_header_cfg         = 0x0000AE28,
+			.irq_subsample_period     = 0x0000AE30,
+			.irq_subsample_pattern    = 0x0000AE34,
+			.framedrop_period         = 0x0000AE38,
+			.framedrop_pattern        = 0x0000AE3C,
+			.system_cache_cfg         = 0x0000AE60,
+			.burst_limit              = 0x0000AE64,
+			.addr_status_0            = 0x0000AE68,
+			.addr_status_1            = 0x0000AE6C,
+			.addr_status_2            = 0x0000AE70,
+			.addr_status_3            = 0x0000AE74,
+			.debug_status_cfg         = 0x0000AE78,
+			.debug_status_0           = 0x0000AE7C,
+			.debug_status_1           = 0x0000AE80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_0,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 3 DS16 */
+		{
+			.cfg                      = 0x0000AF00,
+			.image_addr               = 0x0000AF04,
+			.frame_incr               = 0x0000AF08,
+			.image_cfg_0              = 0x0000AF0C,
+			.image_cfg_1              = 0x0000AF10,
+			.image_cfg_2              = 0x0000AF14,
+			.packer_cfg               = 0x0000AF18,
+			.frame_header_addr        = 0x0000AF20,
+			.frame_header_incr        = 0x0000AF24,
+			.frame_header_cfg         = 0x0000AF28,
+			.irq_subsample_period     = 0x0000AF30,
+			.irq_subsample_pattern    = 0x0000AF34,
+			.framedrop_period         = 0x0000AF38,
+			.framedrop_pattern        = 0x0000AF3C,
+			.system_cache_cfg         = 0x0000AF60,
+			.burst_limit              = 0x0000AF64,
+			.addr_status_0            = 0x0000AF68,
+			.addr_status_1            = 0x0000AF6C,
+			.addr_status_2            = 0x0000AF70,
+			.addr_status_3            = 0x0000AF74,
+			.debug_status_cfg         = 0x0000AF78,
+			.debug_status_0           = 0x0000AF7C,
+			.debug_status_1           = 0x0000AF80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_0,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 4 DISP Y */
+		{
+			.cfg                      = 0x0000B000,
+			.image_addr               = 0x0000B004,
+			.frame_incr               = 0x0000B008,
+			.image_cfg_0              = 0x0000B00C,
+			.image_cfg_1              = 0x0000B010,
+			.image_cfg_2              = 0x0000B014,
+			.packer_cfg               = 0x0000B018,
+			.frame_header_addr        = 0x0000B020,
+			.frame_header_incr        = 0x0000B024,
+			.frame_header_cfg         = 0x0000B028,
+			.irq_subsample_period     = 0x0000B030,
+			.irq_subsample_pattern    = 0x0000B034,
+			.framedrop_period         = 0x0000B038,
+			.framedrop_pattern        = 0x0000B03C,
+			.system_cache_cfg         = 0x0000B060,
+			.burst_limit              = 0x0000B064,
+			.addr_status_0            = 0x0000B068,
+			.addr_status_1            = 0x0000B06C,
+			.addr_status_2            = 0x0000B070,
+			.addr_status_3            = 0x0000B074,
+			.debug_status_cfg         = 0x0000B078,
+			.debug_status_0           = 0x0000B07C,
+			.debug_status_1           = 0x0000B080,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_1,
+			.ubwc_regs                = &vfe480_ubwc_regs_client_4,
+		},
+		/* BUS Client 5 DISP C */
+		{
+			.cfg                      = 0x0000B100,
+			.image_addr               = 0x0000B104,
+			.frame_incr               = 0x0000B108,
+			.image_cfg_0              = 0x0000B10C,
+			.image_cfg_1              = 0x0000B110,
+			.image_cfg_2              = 0x0000B114,
+			.packer_cfg               = 0x0000B118,
+			.frame_header_addr        = 0x0000B120,
+			.frame_header_incr        = 0x0000B124,
+			.frame_header_cfg         = 0x0000B128,
+			.irq_subsample_period     = 0x0000B130,
+			.irq_subsample_pattern    = 0x0000B134,
+			.framedrop_period         = 0x0000B138,
+			.framedrop_pattern        = 0x0000B13C,
+			.system_cache_cfg         = 0x0000B160,
+			.burst_limit              = 0x0000B164,
+			.addr_status_0            = 0x0000B168,
+			.addr_status_1            = 0x0000B16C,
+			.addr_status_2            = 0x0000B170,
+			.addr_status_3            = 0x0000B174,
+			.debug_status_cfg         = 0x0000B178,
+			.debug_status_0           = 0x0000B17C,
+			.debug_status_1           = 0x0000B180,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_1,
+			.ubwc_regs                = &vfe480_ubwc_regs_client_5,
+		},
+		/* BUS Client 6 DISP DS4 */
+		{
+			.cfg                      = 0x0000B200,
+			.image_addr               = 0x0000B204,
+			.frame_incr               = 0x0000B208,
+			.image_cfg_0              = 0x0000B20C,
+			.image_cfg_1              = 0x0000B210,
+			.image_cfg_2              = 0x0000B214,
+			.packer_cfg               = 0x0000B218,
+			.frame_header_addr        = 0x0000B220,
+			.frame_header_incr        = 0x0000B224,
+			.frame_header_cfg         = 0x0000B228,
+			.irq_subsample_period     = 0x0000B230,
+			.irq_subsample_pattern    = 0x0000B234,
+			.framedrop_period         = 0x0000B238,
+			.framedrop_pattern        = 0x0000B23C,
+			.system_cache_cfg         = 0x0000B260,
+			.burst_limit              = 0x0000B264,
+			.addr_status_0            = 0x0000B268,
+			.addr_status_1            = 0x0000B26C,
+			.addr_status_2            = 0x0000B270,
+			.addr_status_3            = 0x0000B274,
+			.debug_status_cfg         = 0x0000B278,
+			.debug_status_0           = 0x0000B27C,
+			.debug_status_1           = 0x0000B280,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_1,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 7 DISP DS16 */
+		{
+			.cfg                      = 0x0000B300,
+			.image_addr               = 0x0000B304,
+			.frame_incr               = 0x0000B308,
+			.image_cfg_0              = 0x0000B30C,
+			.image_cfg_1              = 0x0000B310,
+			.image_cfg_2              = 0x0000B314,
+			.packer_cfg               = 0x0000B318,
+			.frame_header_addr        = 0x0000B320,
+			.frame_header_incr        = 0x0000B324,
+			.frame_header_cfg         = 0x0000B328,
+			.irq_subsample_period     = 0x0000B330,
+			.irq_subsample_pattern    = 0x0000B334,
+			.framedrop_period         = 0x0000B338,
+			.framedrop_pattern        = 0x0000B33C,
+			.system_cache_cfg         = 0x0000B360,
+			.burst_limit              = 0x0000B364,
+			.addr_status_0            = 0x0000B368,
+			.addr_status_1            = 0x0000B36C,
+			.addr_status_2            = 0x0000B370,
+			.addr_status_3            = 0x0000B374,
+			.debug_status_cfg         = 0x0000B378,
+			.debug_status_0           = 0x0000B37C,
+			.debug_status_1           = 0x0000B380,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_1,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 8 FD Y */
+		{
+			.cfg                      = 0x0000B400,
+			.image_addr               = 0x0000B404,
+			.frame_incr               = 0x0000B408,
+			.image_cfg_0              = 0x0000B40C,
+			.image_cfg_1              = 0x0000B410,
+			.image_cfg_2              = 0x0000B414,
+			.packer_cfg               = 0x0000B418,
+			.frame_header_addr        = 0x0000B420,
+			.frame_header_incr        = 0x0000B424,
+			.frame_header_cfg         = 0x0000B428,
+			.irq_subsample_period     = 0x0000B430,
+			.irq_subsample_pattern    = 0x0000B434,
+			.framedrop_period         = 0x0000B438,
+			.framedrop_pattern        = 0x0000B43C,
+			.system_cache_cfg         = 0x0000B460,
+			.burst_limit              = 0x0000B464,
+			.addr_status_0            = 0x0000B468,
+			.addr_status_1            = 0x0000B46C,
+			.addr_status_2            = 0x0000B470,
+			.addr_status_3            = 0x0000B474,
+			.debug_status_cfg         = 0x0000B478,
+			.debug_status_0           = 0x0000B47C,
+			.debug_status_1           = 0x0000B480,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_2,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 9 FD C */
+		{
+			.cfg                      = 0x0000B500,
+			.image_addr               = 0x0000B504,
+			.frame_incr               = 0x0000B508,
+			.image_cfg_0              = 0x0000B50C,
+			.image_cfg_1              = 0x0000B510,
+			.image_cfg_2              = 0x0000B514,
+			.packer_cfg               = 0x0000B518,
+			.frame_header_addr        = 0x0000B520,
+			.frame_header_incr        = 0x0000B524,
+			.frame_header_cfg         = 0x0000B528,
+			.irq_subsample_period     = 0x0000B530,
+			.irq_subsample_pattern    = 0x0000B534,
+			.framedrop_period         = 0x0000B538,
+			.framedrop_pattern        = 0x0000B53C,
+			.system_cache_cfg         = 0x0000B560,
+			.burst_limit              = 0x0000B564,
+			.addr_status_0            = 0x0000B568,
+			.addr_status_1            = 0x0000B56C,
+			.addr_status_2            = 0x0000B570,
+			.addr_status_3            = 0x0000B574,
+			.debug_status_cfg         = 0x0000B578,
+			.debug_status_0           = 0x0000B57C,
+			.debug_status_1           = 0x0000B580,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_2,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 10 PIXEL RAW */
+		{
+			.cfg                      = 0x0000B600,
+			.image_addr               = 0x0000B604,
+			.frame_incr               = 0x0000B608,
+			.image_cfg_0              = 0x0000B60C,
+			.image_cfg_1              = 0x0000B610,
+			.image_cfg_2              = 0x0000B614,
+			.packer_cfg               = 0x0000B618,
+			.frame_header_addr        = 0x0000B620,
+			.frame_header_incr        = 0x0000B624,
+			.frame_header_cfg         = 0x0000B628,
+			.irq_subsample_period     = 0x0000B630,
+			.irq_subsample_pattern    = 0x0000B634,
+			.framedrop_period         = 0x0000B638,
+			.framedrop_pattern        = 0x0000B63C,
+			.system_cache_cfg         = 0x0000B660,
+			.burst_limit              = 0x0000B664,
+			.addr_status_0            = 0x0000B668,
+			.addr_status_1            = 0x0000B66C,
+			.addr_status_2            = 0x0000B670,
+			.addr_status_3            = 0x0000B674,
+			.debug_status_cfg         = 0x0000B678,
+			.debug_status_0           = 0x0000B67C,
+			.debug_status_1           = 0x0000B680,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_3,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 11 CAMIF PD */
+		{
+			.cfg                      = 0x0000B700,
+			.image_addr               = 0x0000B704,
+			.frame_incr               = 0x0000B708,
+			.image_cfg_0              = 0x0000B70C,
+			.image_cfg_1              = 0x0000B710,
+			.image_cfg_2              = 0x0000B714,
+			.packer_cfg               = 0x0000B718,
+			.frame_header_addr        = 0x0000B720,
+			.frame_header_incr        = 0x0000B724,
+			.frame_header_cfg         = 0x0000B728,
+			.irq_subsample_period     = 0x0000B730,
+			.irq_subsample_pattern    = 0x0000B734,
+			.framedrop_period         = 0x0000B738,
+			.framedrop_pattern        = 0x0000B73C,
+			.system_cache_cfg         = 0x0000B760,
+			.burst_limit              = 0x0000B764,
+			.addr_status_0            = 0x0000B768,
+			.addr_status_1            = 0x0000B76C,
+			.addr_status_2            = 0x0000B770,
+			.addr_status_3            = 0x0000B774,
+			.debug_status_cfg         = 0x0000B778,
+			.debug_status_0           = 0x0000B77C,
+			.debug_status_1           = 0x0000B780,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_4,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 12 STATS HDR BE */
+		{
+			.cfg                      = 0x0000B800,
+			.image_addr               = 0x0000B804,
+			.frame_incr               = 0x0000B808,
+			.image_cfg_0              = 0x0000B80C,
+			.image_cfg_1              = 0x0000B810,
+			.image_cfg_2              = 0x0000B814,
+			.packer_cfg               = 0x0000B818,
+			.frame_header_addr        = 0x0000B820,
+			.frame_header_incr        = 0x0000B824,
+			.frame_header_cfg         = 0x0000B828,
+			.irq_subsample_period     = 0x0000B830,
+			.irq_subsample_pattern    = 0x0000B834,
+			.framedrop_period         = 0x0000B838,
+			.framedrop_pattern        = 0x0000B83C,
+			.system_cache_cfg         = 0x0000B860,
+			.burst_limit              = 0x0000B864,
+			.addr_status_0            = 0x0000B868,
+			.addr_status_1            = 0x0000B86C,
+			.addr_status_2            = 0x0000B870,
+			.addr_status_3            = 0x0000B874,
+			.debug_status_cfg         = 0x0000B878,
+			.debug_status_0           = 0x0000B87C,
+			.debug_status_1           = 0x0000B880,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_5,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 13 STATS HDR BHIST */
+		{
+			.cfg                      = 0x0000B900,
+			.image_addr               = 0x0000B904,
+			.frame_incr               = 0x0000B908,
+			.image_cfg_0              = 0x0000B90C,
+			.image_cfg_1              = 0x0000B910,
+			.image_cfg_2              = 0x0000B914,
+			.packer_cfg               = 0x0000B918,
+			.frame_header_addr        = 0x0000B920,
+			.frame_header_incr        = 0x0000B924,
+			.frame_header_cfg         = 0x0000B928,
+			.irq_subsample_period     = 0x0000B930,
+			.irq_subsample_pattern    = 0x0000B934,
+			.framedrop_period         = 0x0000B938,
+			.framedrop_pattern        = 0x0000B93C,
+			.system_cache_cfg         = 0x0000B960,
+			.burst_limit              = 0x0000B964,
+			.addr_status_0            = 0x0000B968,
+			.addr_status_1            = 0x0000B96C,
+			.addr_status_2            = 0x0000B970,
+			.addr_status_3            = 0x0000B974,
+			.debug_status_cfg         = 0x0000B978,
+			.debug_status_0           = 0x0000B97C,
+			.debug_status_1           = 0x0000B980,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_5,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 14 STATS TINTLESS BG */
+		{
+			.cfg                      = 0x0000BA00,
+			.image_addr               = 0x0000BA04,
+			.frame_incr               = 0x0000BA08,
+			.image_cfg_0              = 0x0000BA0C,
+			.image_cfg_1              = 0x0000BA10,
+			.image_cfg_2              = 0x0000BA14,
+			.packer_cfg               = 0x0000BA18,
+			.frame_header_addr        = 0x0000BA20,
+			.frame_header_incr        = 0x0000BA24,
+			.frame_header_cfg         = 0x0000BA28,
+			.irq_subsample_period     = 0x0000BA30,
+			.irq_subsample_pattern    = 0x0000BA34,
+			.framedrop_period         = 0x0000BA38,
+			.framedrop_pattern        = 0x0000BA3C,
+			.system_cache_cfg         = 0x0000BA60,
+			.burst_limit              = 0x0000BA64,
+			.addr_status_0            = 0x0000BA68,
+			.addr_status_1            = 0x0000BA6C,
+			.addr_status_2            = 0x0000BA70,
+			.addr_status_3            = 0x0000BA74,
+			.debug_status_cfg         = 0x0000BA78,
+			.debug_status_0           = 0x0000BA7C,
+			.debug_status_1           = 0x0000BA80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_6,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 15 STATS AWB BG */
+		{
+			.cfg                      = 0x0000BB00,
+			.image_addr               = 0x0000BB04,
+			.frame_incr               = 0x0000BB08,
+			.image_cfg_0              = 0x0000BB0C,
+			.image_cfg_1              = 0x0000BB10,
+			.image_cfg_2              = 0x0000BB14,
+			.packer_cfg               = 0x0000BB18,
+			.frame_header_addr        = 0x0000BB20,
+			.frame_header_incr        = 0x0000BB24,
+			.frame_header_cfg         = 0x0000BB28,
+			.irq_subsample_period     = 0x0000BB30,
+			.irq_subsample_pattern    = 0x0000BB34,
+			.framedrop_period         = 0x0000BB38,
+			.framedrop_pattern        = 0x0000BB3C,
+			.system_cache_cfg         = 0x0000BB60,
+			.burst_limit              = 0x0000BB64,
+			.addr_status_0            = 0x0000BB68,
+			.addr_status_1            = 0x0000BB6C,
+			.addr_status_2            = 0x0000BB70,
+			.addr_status_3            = 0x0000BB74,
+			.debug_status_cfg         = 0x0000BB78,
+			.debug_status_0           = 0x0000BB7C,
+			.debug_status_1           = 0x0000BB80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_6,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 16 STATS BHIST */
+		{
+			.cfg                      = 0x0000BC00,
+			.image_addr               = 0x0000BC04,
+			.frame_incr               = 0x0000BC08,
+			.image_cfg_0              = 0x0000BC0C,
+			.image_cfg_1              = 0x0000BC10,
+			.image_cfg_2              = 0x0000BC14,
+			.packer_cfg               = 0x0000BC18,
+			.frame_header_addr        = 0x0000BC20,
+			.frame_header_incr        = 0x0000BC24,
+			.frame_header_cfg         = 0x0000BC28,
+			.irq_subsample_period     = 0x0000BC30,
+			.irq_subsample_pattern    = 0x0000BC34,
+			.framedrop_period         = 0x0000BC38,
+			.framedrop_pattern        = 0x0000BC3C,
+			.system_cache_cfg         = 0x0000BC60,
+			.burst_limit              = 0x0000BC64,
+			.addr_status_0            = 0x0000BC68,
+			.addr_status_1            = 0x0000BC6C,
+			.addr_status_2            = 0x0000BC70,
+			.addr_status_3            = 0x0000BC74,
+			.debug_status_cfg         = 0x0000BC78,
+			.debug_status_0           = 0x0000BC7C,
+			.debug_status_1           = 0x0000BC80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_7,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 17 STATS RS */
+		{
+			.cfg                      = 0x0000BD00,
+			.image_addr               = 0x0000BD04,
+			.frame_incr               = 0x0000BD08,
+			.image_cfg_0              = 0x0000BD0C,
+			.image_cfg_1              = 0x0000BD10,
+			.image_cfg_2              = 0x0000BD14,
+			.packer_cfg               = 0x0000BD18,
+			.frame_header_addr        = 0x0000BD20,
+			.frame_header_incr        = 0x0000BD24,
+			.frame_header_cfg         = 0x0000BD28,
+			.irq_subsample_period     = 0x0000BD30,
+			.irq_subsample_pattern    = 0x0000BD34,
+			.framedrop_period         = 0x0000BD38,
+			.framedrop_pattern        = 0x0000BD3C,
+			.system_cache_cfg         = 0x0000BD60,
+			.burst_limit              = 0x0000BD64,
+			.addr_status_0            = 0x0000BD68,
+			.addr_status_1            = 0x0000BD6C,
+			.addr_status_2            = 0x0000BD70,
+			.addr_status_3            = 0x0000BD74,
+			.debug_status_cfg         = 0x0000BD78,
+			.debug_status_0           = 0x0000BD7C,
+			.debug_status_1           = 0x0000BD80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_7,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 18 STATS CS */
+		{
+			.cfg                      = 0x0000BE00,
+			.image_addr               = 0x0000BE04,
+			.frame_incr               = 0x0000BE08,
+			.image_cfg_0              = 0x0000BE0C,
+			.image_cfg_1              = 0x0000BE10,
+			.image_cfg_2              = 0x0000BE14,
+			.packer_cfg               = 0x0000BE18,
+			.frame_header_addr        = 0x0000BE20,
+			.frame_header_incr        = 0x0000BE24,
+			.frame_header_cfg         = 0x0000BE28,
+			.irq_subsample_period     = 0x0000BE30,
+			.irq_subsample_pattern    = 0x0000BE34,
+			.framedrop_period         = 0x0000BE38,
+			.framedrop_pattern        = 0x0000BE3C,
+			.system_cache_cfg         = 0x0000BE60,
+			.burst_limit              = 0x0000BE64,
+			.addr_status_0            = 0x0000BE68,
+			.addr_status_1            = 0x0000BE6C,
+			.addr_status_2            = 0x0000BE70,
+			.addr_status_3            = 0x0000BE74,
+			.debug_status_cfg         = 0x0000BE78,
+			.debug_status_0           = 0x0000BE7C,
+			.debug_status_1           = 0x0000BE80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_7,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 19 STATS IHIST */
+		{
+			.cfg                      = 0x0000BF00,
+			.image_addr               = 0x0000BF04,
+			.frame_incr               = 0x0000BF08,
+			.image_cfg_0              = 0x0000BF0C,
+			.image_cfg_1              = 0x0000BF10,
+			.image_cfg_2              = 0x0000BF14,
+			.packer_cfg               = 0x0000BF18,
+			.frame_header_addr        = 0x0000BF20,
+			.frame_header_incr        = 0x0000BF24,
+			.frame_header_cfg         = 0x0000BF28,
+			.irq_subsample_period     = 0x0000BF30,
+			.irq_subsample_pattern    = 0x0000BF34,
+			.framedrop_period         = 0x0000BF38,
+			.framedrop_pattern        = 0x0000BF3C,
+			.system_cache_cfg         = 0x0000BF60,
+			.burst_limit              = 0x0000BF64,
+			.addr_status_0            = 0x0000BF68,
+			.addr_status_1            = 0x0000BF6C,
+			.addr_status_2            = 0x0000BF70,
+			.addr_status_3            = 0x0000BF74,
+			.debug_status_cfg         = 0x0000BF78,
+			.debug_status_0           = 0x0000BF7C,
+			.debug_status_1           = 0x0000BF80,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_7,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 20 STATS BF */
+		{
+			.cfg                      = 0x0000C000,
+			.image_addr               = 0x0000C004,
+			.frame_incr               = 0x0000C008,
+			.image_cfg_0              = 0x0000C00C,
+			.image_cfg_1              = 0x0000C010,
+			.image_cfg_2              = 0x0000C014,
+			.packer_cfg               = 0x0000C018,
+			.frame_header_addr        = 0x0000C020,
+			.frame_header_incr        = 0x0000C024,
+			.frame_header_cfg         = 0x0000C028,
+			.irq_subsample_period     = 0x0000C030,
+			.irq_subsample_pattern    = 0x0000C034,
+			.framedrop_period         = 0x0000C038,
+			.framedrop_pattern        = 0x0000C03C,
+			.system_cache_cfg         = 0x0000C060,
+			.burst_limit              = 0x0000C064,
+			.addr_status_0            = 0x0000C068,
+			.addr_status_1            = 0x0000C06C,
+			.addr_status_2            = 0x0000C070,
+			.addr_status_3            = 0x0000C074,
+			.debug_status_cfg         = 0x0000C078,
+			.debug_status_0           = 0x0000C07C,
+			.debug_status_1           = 0x0000C080,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_8,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 21 PDAF V2.0 */
+		{
+			.cfg                      = 0x0000C100,
+			.image_addr               = 0x0000C104,
+			.frame_incr               = 0x0000C108,
+			.image_cfg_0              = 0x0000C10C,
+			.image_cfg_1              = 0x0000C110,
+			.image_cfg_2              = 0x0000C114,
+			.packer_cfg               = 0x0000C118,
+			.frame_header_addr        = 0x0000C120,
+			.frame_header_incr        = 0x0000C124,
+			.frame_header_cfg         = 0x0000C128,
+			.irq_subsample_period     = 0x0000C130,
+			.irq_subsample_pattern    = 0x0000C134,
+			.framedrop_period         = 0x0000C138,
+			.framedrop_pattern        = 0x0000C13C,
+			.system_cache_cfg         = 0x0000C160,
+			.burst_limit              = 0x0000C164,
+			.addr_status_0            = 0x0000C168,
+			.addr_status_1            = 0x0000C16C,
+			.addr_status_2            = 0x0000C170,
+			.addr_status_3            = 0x0000C174,
+			.debug_status_cfg         = 0x0000C178,
+			.debug_status_0           = 0x0000C17C,
+			.debug_status_1           = 0x0000C180,
+			.comp_group               = CAM_VFE_BUS_VER3_COMP_GRP_9,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 22 LCR */
+		{
+			.cfg                      = 0x0000C200,
+			.image_addr               = 0x0000C204,
+			.frame_incr               = 0x0000C208,
+			.image_cfg_0              = 0x0000C20C,
+			.image_cfg_1              = 0x0000C210,
+			.image_cfg_2              = 0x0000C214,
+			.packer_cfg               = 0x0000C218,
+			.frame_header_addr        = 0x0000C220,
+			.frame_header_incr        = 0x0000C224,
+			.frame_header_cfg         = 0x0000C228,
+			.irq_subsample_period     = 0x0000C230,
+			.irq_subsample_pattern    = 0x0000C234,
+			.framedrop_period         = 0x0000C238,
+			.framedrop_pattern        = 0x0000C23C,
+			.system_cache_cfg         = 0x0000C260,
+			.burst_limit              = 0x0000C264,
+			.addr_status_0            = 0x0000C268,
+			.addr_status_1            = 0x0000C26C,
+			.addr_status_2            = 0x0000C270,
+			.addr_status_3            = 0x0000C274,
+			.debug_status_cfg         = 0x0000C278,
+			.debug_status_0           = 0x0000C27C,
+			.debug_status_1           = 0x0000C280,
+			.comp_group              = CAM_VFE_BUS_VER3_COMP_GRP_10,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 23 RDI0 */
+		{
+			.cfg                      = 0x0000C300,
+			.image_addr               = 0x0000C304,
+			.frame_incr               = 0x0000C308,
+			.image_cfg_0              = 0x0000C30C,
+			.image_cfg_1              = 0x0000C310,
+			.image_cfg_2              = 0x0000C314,
+			.packer_cfg               = 0x0000C318,
+			.frame_header_addr        = 0x0000C320,
+			.frame_header_incr        = 0x0000C324,
+			.frame_header_cfg         = 0x0000C328,
+			.irq_subsample_period     = 0x0000C330,
+			.irq_subsample_pattern    = 0x0000C334,
+			.framedrop_period         = 0x0000C338,
+			.framedrop_pattern        = 0x0000C33C,
+			.system_cache_cfg         = 0x0000C360,
+			.burst_limit              = 0x0000C364,
+			.addr_status_0            = 0x0000C368,
+			.addr_status_1            = 0x0000C36C,
+			.addr_status_2            = 0x0000C370,
+			.addr_status_3            = 0x0000C374,
+			.debug_status_cfg         = 0x0000C378,
+			.debug_status_0           = 0x0000C37C,
+			.debug_status_1           = 0x0000C380,
+			.comp_group              = CAM_VFE_BUS_VER3_COMP_GRP_11,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 24 RDI1 */
+		{
+			.cfg                      = 0x0000C400,
+			.image_addr               = 0x0000C404,
+			.frame_incr               = 0x0000C408,
+			.image_cfg_0              = 0x0000C40C,
+			.image_cfg_1              = 0x0000C410,
+			.image_cfg_2              = 0x0000C414,
+			.packer_cfg               = 0x0000C418,
+			.frame_header_addr        = 0x0000C420,
+			.frame_header_incr        = 0x0000C424,
+			.frame_header_cfg         = 0x0000C428,
+			.irq_subsample_period     = 0x0000C430,
+			.irq_subsample_pattern    = 0x0000C434,
+			.framedrop_period         = 0x0000C438,
+			.framedrop_pattern        = 0x0000C43C,
+			.system_cache_cfg         = 0x0000C460,
+			.burst_limit              = 0x0000C464,
+			.addr_status_0            = 0x0000C468,
+			.addr_status_1            = 0x0000C46C,
+			.addr_status_2            = 0x0000C470,
+			.addr_status_3            = 0x0000C474,
+			.debug_status_cfg         = 0x0000C478,
+			.debug_status_0           = 0x0000C47C,
+			.debug_status_1           = 0x0000C480,
+			.comp_group              = CAM_VFE_BUS_VER3_COMP_GRP_12,
+			.ubwc_regs                = NULL,
+		},
+		/* BUS Client 25 RDI2 */
+		{
+			.cfg                      = 0x0000C500,
+			.image_addr               = 0x0000C504,
+			.frame_incr               = 0x0000C508,
+			.image_cfg_0              = 0x0000C50C,
+			.image_cfg_1              = 0x0000C510,
+			.image_cfg_2              = 0x0000C514,
+			.packer_cfg               = 0x0000C518,
+			.frame_header_addr        = 0x0000C520,
+			.frame_header_incr        = 0x0000C524,
+			.frame_header_cfg         = 0x0000C528,
+			.irq_subsample_period     = 0x0000C530,
+			.irq_subsample_pattern    = 0x0000C534,
+			.framedrop_period         = 0x0000C538,
+			.framedrop_pattern        = 0x0000C53C,
+			.system_cache_cfg         = 0x0000C560,
+			.burst_limit              = 0x0000C564,
+			.addr_status_0            = 0x0000C568,
+			.addr_status_1            = 0x0000C56C,
+			.addr_status_2            = 0x0000C570,
+			.addr_status_3            = 0x0000C574,
+			.debug_status_cfg         = 0x0000C578,
+			.debug_status_0           = 0x0000C57C,
+			.debug_status_1           = 0x0000C580,
+			.comp_group              = CAM_VFE_BUS_VER3_COMP_GRP_13,
+			.ubwc_regs                = NULL,
+		},
+	},
+	.num_out = CAM_VFE_BUS_VER3_VFE_OUT_MAX,
+	.vfe_out_hw_info = {
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI0,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI1,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RDI2,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FULL,
+			.max_width     = 4096,
+			.max_height    = 4096,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS4,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS16,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FD,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_PDAF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  =
+				CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP,
+			.max_width     = 4096,
+			.max_height    = 4096,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_2PD,
+			.max_width     = 1920,
+			.max_height    = 1080,
+		},
+		{
+			.vfe_out_type  = CAM_VFE_BUS_VER3_VFE_OUT_LCR,
+			.max_width     = -1,
+			.max_height    = -1,
+		},
+	},
+};
+
+struct cam_vfe_hw_info cam_vfe480_hw_info = {
+	.irq_reg_info                  = &vfe175_top_irq_reg_info,
+
+	.bus_version                   = CAM_VFE_BUS_VER_3_0,
+	.bus_hw_info                   = &vfe480_bus_hw_info,
+
+	.top_version                   = CAM_VFE_TOP_VER_2_0,
+	.top_hw_info                   = &vfe175_top_hw_info,
+
+	.camif_version                 = CAM_VFE_CAMIF_VER_2_0,
+	.camif_reg                     = &vfe175_camif_reg,
+
+	.camif_lite_version            = CAM_VFE_CAMIF_LITE_VER_2_0,
+	.camif_lite_reg                = &vfe175_camif_lite_reg,
+
+};
+
+#endif /* _CAM_VFE480_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
index 1555609..724f915 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/Makefile
@@ -13,4 +13,4 @@
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include
 ccflags-y += -Idrivers/media/platform/msm/camera/cam_cpas/include
 
-obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_bus.o cam_vfe_bus_ver2.o cam_vfe_bus_rd_ver1.o
+obj-$(CONFIG_SPECTRA_CAMERA) += cam_vfe_bus.o cam_vfe_bus_ver2.o cam_vfe_bus_rd_ver1.o cam_vfe_bus_ver3.o
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
index 99a9eef..067cf7c 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus.c
@@ -1,12 +1,13 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #include "cam_vfe_bus.h"
 #include "cam_vfe_bus_ver1.h"
 #include "cam_vfe_bus_ver2.h"
 #include "cam_vfe_bus_rd_ver1.h"
+#include "cam_vfe_bus_ver3.h"
 #include "cam_debug_util.h"
 
 int cam_vfe_bus_init(uint32_t          bus_version,
@@ -21,17 +22,27 @@
 
 	switch (bus_type) {
 	case BUS_TYPE_WR:
-		if (CAM_VFE_BUS_VER_2_0)
+		switch (bus_version) {
+		case CAM_VFE_BUS_VER_2_0:
 			rc = cam_vfe_bus_ver2_init(soc_info, hw_intf,
 				bus_hw_info, vfe_irq_controller, vfe_bus);
-		break;
+			break;
+		case CAM_VFE_BUS_VER_3_0:
+			rc = cam_vfe_bus_ver3_init(soc_info, hw_intf,
+				bus_hw_info, vfe_irq_controller, vfe_bus);
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Unsupported Bus Version %x",
+				bus_version);
+			break;
+		}
 	case BUS_TYPE_RD:
 		/* Call vfe bus rd init function */
 		rc = cam_vfe_bus_rd_ver1_init(soc_info, hw_intf,
 			bus_hw_info, vfe_irq_controller, vfe_bus);
 		break;
 	default:
-		CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
+		CAM_ERR(CAM_ISP, "Unsupported Bus type %x", bus_type);
 		break;
 	}
 
@@ -47,6 +58,9 @@
 	case CAM_VFE_BUS_VER_2_0:
 		rc = cam_vfe_bus_ver2_deinit(vfe_bus);
 		break;
+	case CAM_VFE_BUS_VER_3_0:
+		rc = cam_vfe_bus_ver3_deinit(vfe_bus);
+		break;
 	default:
 		CAM_ERR(CAM_ISP, "Unsupported Bus Version %x", bus_version);
 		break;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
index 33ec7a0..a44469d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver2.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/ratelimit.h>
@@ -38,21 +38,12 @@
 #define CAM_VFE_BUS_ADDR_NO_SYNC_DEFAULT_VAL \
 	((1 << CAM_VFE_BUS_VER2_MAX_CLIENTS) - 1)
 
-#define ALIGNUP(value, alignment) \
-	((value + alignment - 1) / alignment * alignment)
-
 #define MAX_BUF_UPDATE_REG_NUM   \
 	((sizeof(struct cam_vfe_bus_ver2_reg_offset_bus_client) +  \
 	sizeof(struct cam_vfe_bus_ver2_reg_offset_ubwc_client))/4)
 #define MAX_REG_VAL_PAIR_SIZE    \
 	(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
 
-#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val)    \
-	do {                                               \
-		buf_array[(index)++] = offset;             \
-		buf_array[(index)++] = val;                \
-	} while (0)
-
 static uint32_t bus_error_irq_mask[3] = {
 	0x7800,
 	0x0000,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
new file mode 100644
index 0000000..999a656
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
@@ -0,0 +1,3107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+
+#include <linux/ratelimit.h>
+#include <linux/slab.h>
+#include <uapi/media/cam_isp.h>
+#include "cam_io_util.h"
+#include "cam_debug_util.h"
+#include "cam_cdm_util.h"
+#include "cam_hw_intf.h"
+#include "cam_ife_hw_mgr.h"
+#include "cam_vfe_hw_intf.h"
+#include "cam_irq_controller.h"
+#include "cam_tasklet_util.h"
+#include "cam_vfe_bus.h"
+#include "cam_vfe_bus_ver3.h"
+#include "cam_vfe_core.h"
+#include "cam_debug_util.h"
+#include "cam_cpas_api.h"
+
+static const char drv_name[] = "vfe_bus";
+
+#define CAM_VFE_BUS_VER3_IRQ_REG0                0
+#define CAM_VFE_BUS_VER3_IRQ_REG1                1
+#define CAM_VFE_BUS_VER3_IRQ_MAX                 2
+
+#define CAM_VFE_BUS_VER3_PAYLOAD_MAX             256
+
+#define CAM_VFE_RDI_BUS_DEFAULT_WIDTH               0xFFFF
+#define CAM_VFE_RDI_BUS_DEFAULT_STRIDE              0xFFFF
+#define CAM_VFE_BUS_VER3_INTRA_CLIENT_MASK          0x3
+
+#define MAX_BUF_UPDATE_REG_NUM   \
+	((sizeof(struct cam_vfe_bus_ver3_reg_offset_bus_client) +  \
+	sizeof(struct cam_vfe_bus_ver3_reg_offset_ubwc_client))/4)
+#define MAX_REG_VAL_PAIR_SIZE    \
+	(MAX_BUF_UPDATE_REG_NUM * 2 * CAM_PACKET_MAX_PLANES)
+
+static uint32_t bus_error_irq_mask[2] = {
+	0xC0000000,
+	0x00000000,
+};
+
+enum cam_vfe_bus_ver3_packer_format {
+	PACKER_FMT_VER3_PLAIN_128,
+	PACKER_FMT_VER3_PLAIN_8,
+	PACKER_FMT_VER3_PLAIN_8_ODD_EVEN,
+	PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10,
+	PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10_ODD_EVEN,
+	PACKER_FMT_VER3_PLAIN_16_10BPP,
+	PACKER_FMT_VER3_PLAIN_16_12BPP,
+	PACKER_FMT_VER3_PLAIN_16_14BPP,
+	PACKER_FMT_VER3_PLAIN_16_16BPP,
+	PACKER_FMT_VER3_PLAIN_32,
+	PACKER_FMT_VER3_PLAIN_64,
+	PACKER_FMT_VER3_TP_10,
+	PACKER_FMT_VER3_MAX,
+};
+
+struct cam_vfe_bus_ver3_common_data {
+	uint32_t                                    core_index;
+	void __iomem                               *mem_base;
+	struct cam_hw_intf                         *hw_intf;
+	void                                       *bus_irq_controller;
+	void                                       *vfe_irq_controller;
+	struct cam_vfe_bus_ver3_reg_offset_common  *common_reg;
+	uint32_t                                    io_buf_update[
+		MAX_REG_VAL_PAIR_SIZE];
+
+	struct cam_vfe_bus_irq_evt_payload          evt_payload[
+		CAM_VFE_BUS_VER3_PAYLOAD_MAX];
+	struct list_head                            free_payload_list;
+	spinlock_t                                  spin_lock;
+	struct mutex                                bus_mutex;
+	uint32_t                                    secure_mode;
+	uint32_t                                    num_sec_out;
+	uint32_t                                    addr_no_sync;
+};
+
+struct cam_vfe_bus_ver3_wm_resource_data {
+	uint32_t             index;
+	struct cam_vfe_bus_ver3_common_data            *common_data;
+	struct cam_vfe_bus_ver3_reg_offset_bus_client  *hw_regs;
+	void                                           *ctx;
+
+	bool                 init_cfg_done;
+	bool                 hfr_cfg_done;
+
+	uint32_t             offset;
+	uint32_t             width;
+	uint32_t             height;
+	uint32_t             stride;
+	uint32_t             format;
+	enum cam_vfe_bus_ver3_packer_format pack_fmt;
+
+	uint32_t             burst_len;
+
+	uint32_t             en_ubwc;
+	bool                 ubwc_updated;
+	uint32_t             packer_cfg;
+	uint32_t             h_init;
+	uint32_t             ubwc_meta_addr;
+	uint32_t             ubwc_meta_cfg;
+	uint32_t             ubwc_mode_cfg;
+	uint32_t             ubwc_stats_ctrl;
+	uint32_t             ubwc_ctrl_2;
+
+	uint32_t             irq_subsample_period;
+	uint32_t             irq_subsample_pattern;
+	uint32_t             framedrop_period;
+	uint32_t             framedrop_pattern;
+
+	uint32_t             en_cfg;
+	uint32_t             is_dual;
+};
+
+struct cam_vfe_bus_ver3_comp_grp_data {
+	enum cam_vfe_bus_ver3_comp_grp_type          comp_grp_type;
+	struct cam_vfe_bus_ver3_common_data         *common_data;
+	struct cam_vfe_bus_ver3_reg_offset_comp_grp *hw_regs;
+
+	uint32_t                                     irq_enabled;
+
+	uint32_t                                     is_master;
+	uint32_t                                     is_dual;
+	uint32_t                                     dual_slave_core;
+	uint32_t                                     intra_client_mask;
+	uint32_t                                     addr_sync_mode;
+
+	uint32_t                                     acquire_dev_cnt;
+	uint32_t                                     irq_trigger_cnt;
+
+	void                                        *ctx;
+};
+
+struct cam_vfe_bus_ver3_vfe_out_data {
+	uint32_t                              out_type;
+	struct cam_vfe_bus_ver3_common_data  *common_data;
+
+	uint32_t                         num_wm;
+	struct cam_isp_resource_node    *wm_res[PLANE_MAX];
+
+	struct cam_isp_resource_node    *comp_grp;
+	enum cam_isp_hw_sync_mode        dual_comp_sync_mode;
+	uint32_t                         dual_hw_alternate_vfe_id;
+	struct list_head                 vfe_out_list;
+
+	uint32_t                         format;
+	uint32_t                         max_width;
+	uint32_t                         max_height;
+	struct cam_cdm_utils_ops        *cdm_util_ops;
+	uint32_t                         secure_mode;
+};
+
+struct cam_vfe_bus_ver3_priv {
+	struct cam_vfe_bus_ver3_common_data common_data;
+	uint32_t                            num_client;
+	uint32_t                            num_out;
+
+	struct cam_isp_resource_node  bus_client[CAM_VFE_BUS_VER3_MAX_CLIENTS];
+	struct cam_isp_resource_node  comp_grp[CAM_VFE_BUS_VER3_COMP_GRP_MAX];
+	struct cam_isp_resource_node  vfe_out[CAM_VFE_BUS_VER3_VFE_OUT_MAX];
+
+	struct list_head                    free_comp_grp;
+	struct list_head                    used_comp_grp;
+
+	uint32_t                            irq_handle;
+	uint32_t                            error_irq_handle;
+	void                               *tasklet_info;
+};
+
+static int cam_vfe_bus_ver3_process_cmd(
+	struct cam_isp_resource_node *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size);
+
+static int cam_vfe_bus_ver3_get_evt_payload(
+	struct cam_vfe_bus_ver3_common_data  *common_data,
+	struct cam_vfe_bus_irq_evt_payload  **evt_payload)
+{
+	int rc;
+
+	spin_lock(&common_data->spin_lock);
+	if (list_empty(&common_data->free_payload_list)) {
+		*evt_payload = NULL;
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No free payload");
+		rc = -ENODEV;
+		goto done;
+	}
+
+	*evt_payload = list_first_entry(&common_data->free_payload_list,
+		struct cam_vfe_bus_irq_evt_payload, list);
+	list_del_init(&(*evt_payload)->list);
+	rc = 0;
+done:
+	spin_unlock(&common_data->spin_lock);
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_put_evt_payload(void     *core_info,
+	struct cam_vfe_bus_irq_evt_payload     **evt_payload)
+{
+	struct cam_vfe_bus_ver3_common_data *common_data = NULL;
+	uint32_t  *ife_irq_regs = NULL;
+	uint32_t   status_reg0, status_reg1;
+	unsigned long flags;
+
+	if (!core_info) {
+		CAM_ERR(CAM_ISP, "Invalid param core_info NULL");
+		return -EINVAL;
+	}
+	if (*evt_payload == NULL) {
+		CAM_ERR(CAM_ISP, "No payload to put");
+		return -EINVAL;
+	}
+	(*evt_payload)->error_type = 0;
+	ife_irq_regs = (*evt_payload)->irq_reg_val;
+	status_reg0 = ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+	status_reg1 = ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS1];
+
+	if (status_reg0 || status_reg1) {
+		CAM_DBG(CAM_ISP, "status0 0x%x status1 0x%x status2 0x%x",
+			status_reg0, status_reg1);
+		return 0;
+	}
+
+	common_data = core_info;
+
+	spin_lock_irqsave(&common_data->spin_lock, flags);
+	list_add_tail(&(*evt_payload)->list,
+		&common_data->free_payload_list);
+	spin_unlock_irqrestore(&common_data->spin_lock, flags);
+
+	*evt_payload = NULL;
+
+	CAM_DBG(CAM_ISP, "Done");
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_get_intra_client_mask(
+	enum cam_vfe_bus_ver3_vfe_core_id  dual_slave_core,
+	enum cam_vfe_bus_ver3_vfe_core_id  current_core,
+	uint32_t                          *intra_client_mask)
+{
+	int rc = 0;
+	uint32_t version_based_intra_client_mask = 0x1;
+
+	*intra_client_mask = 0;
+
+	if (dual_slave_core == current_core) {
+		CAM_ERR(CAM_ISP,
+			"Invalid params. Same core as Master and Slave");
+		return -EINVAL;
+	}
+
+	switch (current_core) {
+	case CAM_VFE_BUS_VER3_VFE_CORE_0:
+		switch (dual_slave_core) {
+		case CAM_VFE_BUS_VER3_VFE_CORE_1:
+			*intra_client_mask = version_based_intra_client_mask;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
+				dual_slave_core);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_CORE_1:
+		switch (dual_slave_core) {
+		case CAM_VFE_BUS_VER3_VFE_CORE_0:
+			*intra_client_mask = version_based_intra_client_mask;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid value for slave core %u",
+				dual_slave_core);
+			rc = -EINVAL;
+			break;
+		}
+		break;
+	default:
+		CAM_ERR(CAM_ISP,
+			"Invalid value for master core %u", current_core);
+		rc = -EINVAL;
+		break;
+	}
+
+	return rc;
+}
+
+static bool cam_vfe_bus_ver3_can_be_secure(uint32_t out_type)
+{
+	switch (out_type) {
+	case CAM_VFE_BUS_VER3_VFE_OUT_FULL:
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS4:
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS16:
+	case CAM_VFE_BUS_VER3_VFE_OUT_FD:
+	case CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP:
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI0:
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI1:
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI2:
+	case CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP:
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP:
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP:
+		return true;
+
+	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
+	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
+	case CAM_VFE_BUS_VER3_VFE_OUT_LCR:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST:
+	default:
+		return false;
+	}
+}
+
+static enum cam_vfe_bus_ver3_vfe_out_type
+	cam_vfe_bus_ver3_get_out_res_id(uint32_t res_type)
+{
+	switch (res_type) {
+	case CAM_ISP_IFE_OUT_RES_FULL:
+		return CAM_VFE_BUS_VER3_VFE_OUT_FULL;
+	case CAM_ISP_IFE_OUT_RES_DS4:
+		return CAM_VFE_BUS_VER3_VFE_OUT_DS4;
+	case CAM_ISP_IFE_OUT_RES_DS16:
+		return CAM_VFE_BUS_VER3_VFE_OUT_DS16;
+	case CAM_ISP_IFE_OUT_RES_FD:
+		return CAM_VFE_BUS_VER3_VFE_OUT_FD;
+	case CAM_ISP_IFE_OUT_RES_RAW_DUMP:
+		return CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP;
+	case CAM_ISP_IFE_OUT_RES_PDAF:
+		return CAM_VFE_BUS_VER3_VFE_OUT_PDAF;
+	case CAM_ISP_IFE_OUT_RES_2PD:
+		return CAM_VFE_BUS_VER3_VFE_OUT_2PD;
+	case CAM_ISP_IFE_OUT_RES_RDI_0:
+		return CAM_VFE_BUS_VER3_VFE_OUT_RDI0;
+	case CAM_ISP_IFE_OUT_RES_RDI_1:
+		return CAM_VFE_BUS_VER3_VFE_OUT_RDI1;
+	case CAM_ISP_IFE_OUT_RES_RDI_2:
+		return CAM_VFE_BUS_VER3_VFE_OUT_RDI2;
+	case CAM_ISP_IFE_OUT_RES_RDI_3:
+		return CAM_VFE_BUS_VER3_VFE_OUT_RDI3;
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BE:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE;
+	case CAM_ISP_IFE_OUT_RES_STATS_HDR_BHIST:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST;
+	case CAM_ISP_IFE_OUT_RES_STATS_TL_BG:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG;
+	case CAM_ISP_IFE_OUT_RES_STATS_BF:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF;
+	case CAM_ISP_IFE_OUT_RES_STATS_AWB_BG:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG;
+	case CAM_ISP_IFE_OUT_RES_STATS_BHIST:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST;
+	case CAM_ISP_IFE_OUT_RES_STATS_RS:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS;
+	case CAM_ISP_IFE_OUT_RES_STATS_CS:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS;
+	case CAM_ISP_IFE_OUT_RES_STATS_IHIST:
+		return CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST;
+	case CAM_ISP_IFE_OUT_RES_FULL_DISP:
+		return CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP;
+	case CAM_ISP_IFE_OUT_RES_DS4_DISP:
+		return CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP;
+	case CAM_ISP_IFE_OUT_RES_DS16_DISP:
+		return CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP;
+	case CAM_ISP_IFE_OUT_RES_LCR:
+		return CAM_VFE_BUS_VER3_VFE_OUT_LCR;
+	default:
+		return CAM_VFE_BUS_VER3_VFE_OUT_MAX;
+	}
+}
+
+static int cam_vfe_bus_ver3_get_num_wm(
+	enum cam_vfe_bus_ver3_vfe_out_type    res_type,
+	uint32_t                              format)
+{
+	switch (res_type) {
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI0:
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI1:
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI2:
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI3:
+		switch (format) {
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_MIPI_RAW_14:
+		case CAM_FORMAT_MIPI_RAW_16:
+		case CAM_FORMAT_MIPI_RAW_20:
+		case CAM_FORMAT_DPCM_10_6_10:
+		case CAM_FORMAT_DPCM_10_8_10:
+		case CAM_FORMAT_DPCM_12_6_12:
+		case CAM_FORMAT_DPCM_12_8_12:
+		case CAM_FORMAT_DPCM_14_8_14:
+		case CAM_FORMAT_DPCM_14_10_14:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN32_20:
+		case CAM_FORMAT_PLAIN128:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_FULL:
+	case CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP:
+		switch (format) {
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_TP10:
+		case CAM_FORMAT_UBWC_NV12:
+		case CAM_FORMAT_UBWC_NV12_4R:
+		case CAM_FORMAT_UBWC_TP10:
+		case CAM_FORMAT_UBWC_P010:
+		case CAM_FORMAT_PLAIN16_10:
+			return 2;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_FD:
+		switch (format) {
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_TP10:
+		case CAM_FORMAT_PLAIN16_10:
+			return 2;
+		case CAM_FORMAT_Y_ONLY:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS4:
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP:
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS16:
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP:
+		switch (format) {
+		case CAM_FORMAT_PD8:
+		case CAM_FORMAT_PD10:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP:
+		switch (format) {
+		case CAM_FORMAT_ARGB_14:
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
+		switch (format) {
+		case CAM_FORMAT_PLAIN8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
+		switch (format) {
+		case CAM_FORMAT_PLAIN16_8:
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN64:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS:
+		switch (format) {
+		case CAM_FORMAT_PLAIN64:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS:
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST:
+		switch (format) {
+		case CAM_FORMAT_PLAIN16_16:
+			return 1;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_LCR:
+		return 1;
+	default:
+		break;
+	}
+
+	CAM_ERR(CAM_ISP, "Unsupported format %u for resource_type %u",
+		format, res_type);
+
+	return -EINVAL;
+}
+
+static int cam_vfe_bus_ver3_get_wm_idx(
+	enum cam_vfe_bus_ver3_vfe_out_type vfe_out_res_id,
+	enum cam_vfe_bus_plane_type plane,
+	uint32_t hw_idx)
+{
+	int wm_idx = -1;
+
+	switch (vfe_out_res_id) {
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI0:
+		switch (plane) {
+		case PLANE_Y:
+			if (hw_idx < 2)
+				wm_idx = 23;
+			else
+				wm_idx = 0;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI1:
+		switch (plane) {
+		case PLANE_Y:
+			if (hw_idx < 2)
+				wm_idx = 24;
+			else
+				wm_idx = 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI2:
+		switch (plane) {
+		case PLANE_Y:
+			if (hw_idx < 2)
+				wm_idx = 25;
+			else
+				wm_idx = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_RDI3:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 3;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_FULL:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 0;
+			break;
+		case PLANE_C:
+			wm_idx = 1;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS4:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 2;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS16:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 3;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_FD:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 8;
+			break;
+		case PLANE_C:
+			wm_idx = 9;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 10;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_PDAF:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 21;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_2PD:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 11;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 12;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 13;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 14;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 20;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 15;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 16;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 17;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 18;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 19;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 4;
+			break;
+		case PLANE_C:
+			wm_idx = 5;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 6;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 7;
+			break;
+		default:
+			break;
+		}
+		break;
+	case CAM_VFE_BUS_VER3_VFE_OUT_LCR:
+		switch (plane) {
+		case PLANE_Y:
+			wm_idx = 22;
+			break;
+		default:
+			break;
+		}
+	default:
+		break;
+	}
+
+	return wm_idx;
+}
+
+static enum cam_vfe_bus_ver3_packer_format
+	cam_vfe_bus_ver3_get_packer_fmt(uint32_t out_fmt, int wm_index)
+{
+	switch (out_fmt) {
+	case CAM_FORMAT_MIPI_RAW_6:
+	case CAM_FORMAT_MIPI_RAW_8:
+	case CAM_FORMAT_MIPI_RAW_10:
+	case CAM_FORMAT_MIPI_RAW_12:
+	case CAM_FORMAT_MIPI_RAW_14:
+	case CAM_FORMAT_MIPI_RAW_16:
+	case CAM_FORMAT_MIPI_RAW_20:
+	case CAM_FORMAT_PLAIN16_8:
+	case CAM_FORMAT_PLAIN128:
+	case CAM_FORMAT_PD8:
+		return PACKER_FMT_VER3_PLAIN_128;
+	case CAM_FORMAT_PLAIN8:
+		return PACKER_FMT_VER3_PLAIN_8;
+	case CAM_FORMAT_NV21:
+		if ((wm_index == 1) || (wm_index == 3) || (wm_index == 5))
+			return PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10_ODD_EVEN;
+	case CAM_FORMAT_NV12:
+	case CAM_FORMAT_UBWC_NV12:
+	case CAM_FORMAT_UBWC_NV12_4R:
+	case CAM_FORMAT_Y_ONLY:
+		return PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10;
+	case CAM_FORMAT_PLAIN16_10:
+		return PACKER_FMT_VER3_PLAIN_16_10BPP;
+	case CAM_FORMAT_PLAIN16_12:
+		return PACKER_FMT_VER3_PLAIN_16_12BPP;
+	case CAM_FORMAT_PLAIN16_14:
+		return PACKER_FMT_VER3_PLAIN_16_14BPP;
+	case CAM_FORMAT_PLAIN16_16:
+		return PACKER_FMT_VER3_PLAIN_16_16BPP;
+	case CAM_FORMAT_PLAIN32:
+		return PACKER_FMT_VER3_PLAIN_32;
+	case CAM_FORMAT_PLAIN64:
+	case CAM_FORMAT_ARGB_16:
+	case CAM_FORMAT_PD10:
+		return PACKER_FMT_VER3_PLAIN_64;
+	case CAM_FORMAT_UBWC_TP10:
+	case CAM_FORMAT_TP10:
+		return PACKER_FMT_VER3_TP_10;
+	default:
+		return PACKER_FMT_VER3_MAX;
+	}
+}
+
+static int cam_vfe_bus_ver3_acquire_wm(
+	struct cam_vfe_bus_ver3_priv          *ver3_bus_priv,
+	struct cam_isp_out_port_info          *out_port_info,
+	void                                  *tasklet,
+	void                                  *ctx,
+	enum cam_vfe_bus_ver3_vfe_out_type     vfe_out_res_id,
+	enum cam_vfe_bus_plane_type            plane,
+	struct cam_isp_resource_node         **wm_res,
+	uint32_t                               is_dual,
+	enum cam_vfe_bus_ver3_comp_grp_type   *comp_grp_id,
+	uint32_t                               hw_idx)
+{
+	uint32_t wm_idx = 0;
+	struct cam_isp_resource_node              *wm_res_local = NULL;
+	struct cam_vfe_bus_ver3_wm_resource_data  *rsrc_data = NULL;
+
+	*wm_res = NULL;
+
+	/* No need to allocate for BUS VER2. VFE OUT to WM is fixed. */
+	wm_idx = cam_vfe_bus_ver3_get_wm_idx(vfe_out_res_id, plane, hw_idx);
+	if (wm_idx < 0 || wm_idx >= ver3_bus_priv->num_client) {
+		CAM_ERR(CAM_ISP, "Unsupported VFE out %d plane %d",
+			vfe_out_res_id, plane);
+		return -EINVAL;
+	}
+
+	wm_res_local = &ver3_bus_priv->bus_client[wm_idx];
+	if (wm_res_local->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "WM:%d not available state:%d",
+			wm_idx, wm_res_local->res_state);
+		return -EALREADY;
+	}
+	wm_res_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	wm_res_local->tasklet_info = tasklet;
+
+	rsrc_data = wm_res_local->res_priv;
+	rsrc_data->ctx = ctx;
+	rsrc_data->format = out_port_info->format;
+	rsrc_data->pack_fmt = cam_vfe_bus_ver3_get_packer_fmt(rsrc_data->format,
+		wm_idx);
+
+	rsrc_data->width = out_port_info->width;
+	rsrc_data->height = out_port_info->height;
+	rsrc_data->is_dual = is_dual;
+	/* Set WM offset value to default */
+	rsrc_data->offset  = 0;
+	CAM_DBG(CAM_ISP, "WM:%d width %d height %d", rsrc_data->index,
+		rsrc_data->width, rsrc_data->height);
+
+	if (hw_idx > 1)
+		goto rdi_config;
+
+	if (rsrc_data->index > 22) {
+rdi_config:
+		/* WM 23-25 refers to RDI 0/ RDI 1/RDI 2 */
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_MIPI_RAW_6:
+		case CAM_FORMAT_MIPI_RAW_8:
+		case CAM_FORMAT_MIPI_RAW_10:
+		case CAM_FORMAT_MIPI_RAW_12:
+		case CAM_FORMAT_MIPI_RAW_14:
+		case CAM_FORMAT_MIPI_RAW_16:
+		case CAM_FORMAT_MIPI_RAW_20:
+		case CAM_FORMAT_PLAIN128:
+			rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+			rsrc_data->height = 0;
+			rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
+			rsrc_data->pack_fmt = 0x0;
+			rsrc_data->en_cfg = 0x3;
+			break;
+		case CAM_FORMAT_PLAIN8:
+			rsrc_data->en_cfg = 0x1;
+			rsrc_data->pack_fmt = 0x1;
+			rsrc_data->width = rsrc_data->width * 2;
+			rsrc_data->stride = rsrc_data->width;
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+		case CAM_FORMAT_PLAIN16_12:
+		case CAM_FORMAT_PLAIN16_14:
+		case CAM_FORMAT_PLAIN16_16:
+		case CAM_FORMAT_PLAIN32_20:
+			rsrc_data->width = CAM_VFE_RDI_BUS_DEFAULT_WIDTH;
+			rsrc_data->height = 0;
+			rsrc_data->stride = CAM_VFE_RDI_BUS_DEFAULT_STRIDE;
+			rsrc_data->pack_fmt = 0x0;
+			rsrc_data->en_cfg = 0x3;
+			break;
+		case CAM_FORMAT_PLAIN64:
+			rsrc_data->en_cfg = 0x1;
+			rsrc_data->pack_fmt = 0xA;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Unsupported RDI format %d",
+				rsrc_data->format);
+			return -EINVAL;
+		}
+	} else if ((rsrc_data->index < 2) ||
+		(rsrc_data->index == 8) || (rsrc_data->index == 9) ||
+		(rsrc_data->index == 4) || (rsrc_data->index == 5)) {
+		/*
+		 * WM 0-1 FULL_OUT, WM 8-9 FD_OUT,
+		 * WM 4-5 FULL_DISP
+		 */
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_UBWC_NV12_4R:
+			rsrc_data->en_ubwc = 1;
+			rsrc_data->width = ALIGNUP(rsrc_data->width, 64);
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_UBWC_NV12:
+			rsrc_data->en_ubwc = 1;
+			/* Fall through for NV12 */
+		case CAM_FORMAT_NV21:
+		case CAM_FORMAT_NV12:
+		case CAM_FORMAT_Y_ONLY:
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_UBWC_TP10:
+			rsrc_data->en_ubwc = 1;
+			rsrc_data->width =
+				ALIGNUP(rsrc_data->width, 48) * 4 / 3;
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_TP10:
+			rsrc_data->width =
+				ALIGNUP(rsrc_data->width, 3) * 4 / 3;
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			break;
+		case CAM_FORMAT_PLAIN16_10:
+			switch (plane) {
+			case PLANE_C:
+				rsrc_data->height /= 2;
+				break;
+			case PLANE_Y:
+				break;
+			default:
+				CAM_ERR(CAM_ISP, "Invalid plane %d", plane);
+				return -EINVAL;
+			}
+			rsrc_data->width *= 2;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid format %d",
+				rsrc_data->format);
+			return -EINVAL;
+		}
+		rsrc_data->en_cfg = 0x1;
+	} else if (rsrc_data->index > 11 && rsrc_data->index < 21) {
+		/* WM 12-20 stats */
+		rsrc_data->width = 0;
+		rsrc_data->height = 0;
+		rsrc_data->stride = 1;
+		rsrc_data->en_cfg = 0x3;
+	} else if (rsrc_data->index == 11 || rsrc_data->index == 21) {
+		/* WM 21/11 PDAF/2PD */
+		rsrc_data->width = 0;
+		rsrc_data->height = 0;
+		rsrc_data->stride = 1;
+		rsrc_data->en_cfg = 0x3;
+		if (vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_PDAF)
+			/* LSB aligned */
+			rsrc_data->pack_fmt |= 0x10;
+	} else if (rsrc_data->index == 10) {
+		/* WM 10 Raw dump */
+		rsrc_data->width = rsrc_data->width * 2;
+		rsrc_data->stride = rsrc_data->width;
+		rsrc_data->en_cfg = 0x1;
+		/* LSB aligned */
+		rsrc_data->pack_fmt |= 0x10;
+	} else if (rsrc_data->index == 22) {
+		switch (rsrc_data->format) {
+		case CAM_FORMAT_PLAIN16_16:
+			rsrc_data->width = 0;
+			rsrc_data->height = 0;
+			rsrc_data->stride = 1;
+			rsrc_data->en_cfg = 0x3;
+			/* LSB aligned */
+			rsrc_data->pack_fmt |= 0x10;
+			break;
+		default:
+			CAM_ERR(CAM_ISP, "Invalid format %d",
+				rsrc_data->format);
+			return -EINVAL;
+		}
+	} else {
+		/* Write master 2-3 and 6-7 DS ports */
+		uint32_t align_width;
+
+		rsrc_data->width = rsrc_data->width * 4;
+		rsrc_data->height = rsrc_data->height / 2;
+		rsrc_data->en_cfg = 0x1;
+		CAM_DBG(CAM_ISP, "before width %d", rsrc_data->width);
+		align_width = ALIGNUP(rsrc_data->width, 16);
+		if (align_width != rsrc_data->width) {
+			CAM_WARN(CAM_ISP,
+				"override width %u with expected %u",
+				rsrc_data->width, align_width);
+			rsrc_data->width = align_width;
+		}
+	}
+
+	*wm_res = wm_res_local;
+	*comp_grp_id = rsrc_data->hw_regs->comp_group;
+
+	CAM_DBG(CAM_ISP, "WM:%d processed width %d, processed  height %d",
+		rsrc_data->index, rsrc_data->width, rsrc_data->height);
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_release_wm(void   *bus_priv,
+	struct cam_isp_resource_node     *wm_res)
+{
+	struct cam_vfe_bus_ver3_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+
+	rsrc_data->offset = 0;
+	rsrc_data->width = 0;
+	rsrc_data->height = 0;
+	rsrc_data->stride = 0;
+	rsrc_data->format = 0;
+	rsrc_data->pack_fmt = 0;
+	rsrc_data->burst_len = 0;
+	rsrc_data->irq_subsample_period = 0;
+	rsrc_data->irq_subsample_pattern = 0;
+	rsrc_data->framedrop_period = 0;
+	rsrc_data->framedrop_pattern = 0;
+	rsrc_data->packer_cfg = 0;
+	rsrc_data->en_ubwc = 0;
+	rsrc_data->h_init = 0;
+	rsrc_data->ubwc_meta_addr = 0;
+	rsrc_data->ubwc_meta_cfg = 0;
+	rsrc_data->ubwc_mode_cfg = 0;
+	rsrc_data->ubwc_stats_ctrl = 0;
+	rsrc_data->ubwc_ctrl_2 = 0;
+	rsrc_data->init_cfg_done = false;
+	rsrc_data->hfr_cfg_done = false;
+	rsrc_data->ubwc_updated = false;
+	rsrc_data->en_cfg = 0;
+	rsrc_data->is_dual = 0;
+
+	wm_res->tasklet_info = NULL;
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	CAM_DBG(CAM_ISP, "Release WM:%d", rsrc_data->index);
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_start_wm(struct cam_isp_resource_node *wm_res)
+{
+	int val = 0;
+	struct cam_vfe_bus_ver3_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_vfe_bus_ver3_common_data        *common_data =
+		rsrc_data->common_data;
+	struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs;
+
+	ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *)
+		rsrc_data->hw_regs->ubwc_regs;
+
+	cam_io_w(0xf, common_data->mem_base + rsrc_data->hw_regs->burst_limit);
+
+	cam_io_w((rsrc_data->height << 16) | rsrc_data->width,
+		common_data->mem_base + rsrc_data->hw_regs->image_cfg_0);
+	cam_io_w(rsrc_data->pack_fmt,
+		common_data->mem_base + rsrc_data->hw_regs->packer_cfg);
+
+	/* Configure stride for RDIs on full IFE */
+	if (wm_res->hw_intf->hw_idx < 2 && rsrc_data->index > 22)
+		cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+			rsrc_data->hw_regs->image_cfg_2));
+
+	/* Configure stride for RDIs on IFE lite */
+	if (wm_res->hw_intf->hw_idx >= 2)
+		cam_io_w_mb(rsrc_data->stride, (common_data->mem_base +
+			rsrc_data->hw_regs->image_cfg_2));
+
+	/* enable ubwc if needed*/
+	if (rsrc_data->en_ubwc) {
+		val = cam_io_r_mb(common_data->mem_base + ubwc_regs->mode_cfg);
+		val |= 0x1;
+		cam_io_w_mb(val, common_data->mem_base + ubwc_regs->mode_cfg);
+	}
+
+	/* Enable WM */
+	cam_io_w_mb(rsrc_data->en_cfg, common_data->mem_base +
+		rsrc_data->hw_regs->cfg);
+
+	CAM_DBG(CAM_ISP, "WM:%d width = %d, height = %d", rsrc_data->index,
+		rsrc_data->width, rsrc_data->height);
+	CAM_DBG(CAM_ISP, "WM:%d pk_fmt = %d", rsrc_data->index,
+		rsrc_data->pack_fmt & PACKER_FMT_VER3_MAX);
+	CAM_DBG(CAM_ISP, "WM:%d stride = %d, burst len = %d",
+		rsrc_data->index, rsrc_data->stride, 0xf);
+	CAM_DBG(CAM_ISP, "Start WM:%d offset 0x%x val 0x%x",
+		rsrc_data->index, (uint32_t) rsrc_data->hw_regs->cfg,
+		rsrc_data->en_cfg);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_stop_wm(struct cam_isp_resource_node *wm_res)
+{
+	struct cam_vfe_bus_ver3_wm_resource_data   *rsrc_data =
+		wm_res->res_priv;
+	struct cam_vfe_bus_ver3_common_data        *common_data =
+		rsrc_data->common_data;
+
+	/* Disable WM */
+	cam_io_w_mb(0x0, common_data->mem_base + rsrc_data->hw_regs->cfg);
+	CAM_DBG(CAM_ISP, "Stop WM:%d", rsrc_data->index);
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	rsrc_data->init_cfg_done = false;
+	rsrc_data->hfr_cfg_done = false;
+	rsrc_data->ubwc_updated = false;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_handle_wm_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_bus_ver3_handle_wm_done_bottom_half(void *wm_node,
+	void *evt_payload_priv)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_bus_ver3_init_wm_resource(uint32_t index,
+	struct cam_vfe_bus_ver3_priv    *ver3_bus_priv,
+	struct cam_vfe_bus_ver3_hw_info *ver3_hw_info,
+	struct cam_isp_resource_node    *wm_res)
+{
+	struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data;
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver3_wm_resource_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for WM res priv");
+		return -ENOMEM;
+	}
+	wm_res->res_priv = rsrc_data;
+
+	rsrc_data->index = index;
+	rsrc_data->hw_regs = &ver3_hw_info->bus_client_reg[index];
+	rsrc_data->common_data = &ver3_bus_priv->common_data;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&wm_res->list);
+
+	wm_res->start = cam_vfe_bus_ver3_start_wm;
+	wm_res->stop = cam_vfe_bus_ver3_stop_wm;
+	wm_res->top_half_handler = cam_vfe_bus_ver3_handle_wm_done_top_half;
+	wm_res->bottom_half_handler =
+		cam_vfe_bus_ver3_handle_wm_done_bottom_half;
+	wm_res->hw_intf = ver3_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_deinit_wm_resource(
+	struct cam_isp_resource_node    *wm_res)
+{
+	struct cam_vfe_bus_ver3_wm_resource_data *rsrc_data;
+
+	wm_res->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	INIT_LIST_HEAD(&wm_res->list);
+
+	wm_res->start = NULL;
+	wm_res->stop = NULL;
+	wm_res->top_half_handler = NULL;
+	wm_res->bottom_half_handler = NULL;
+	wm_res->hw_intf = NULL;
+
+	rsrc_data = wm_res->res_priv;
+	wm_res->res_priv = NULL;
+	if (!rsrc_data)
+		return -ENOMEM;
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static bool cam_vfe_bus_ver3_match_comp_grp(
+	struct cam_vfe_bus_ver3_priv           *ver3_bus_priv,
+	struct cam_isp_resource_node          **comp_grp,
+	uint32_t                                comp_grp_id)
+{
+	struct cam_vfe_bus_ver3_comp_grp_data  *rsrc_data = NULL;
+	struct cam_isp_resource_node           *comp_grp_local = NULL;
+
+	list_for_each_entry(comp_grp_local,
+		&ver3_bus_priv->used_comp_grp, list) {
+		rsrc_data = comp_grp_local->res_priv;
+		if (rsrc_data->comp_grp_type == comp_grp_id) {
+			/* Match found */
+			*comp_grp = comp_grp_local;
+			return true;
+		}
+	}
+
+	list_for_each_entry(comp_grp_local,
+		&ver3_bus_priv->free_comp_grp, list) {
+		rsrc_data = comp_grp_local->res_priv;
+		if (rsrc_data->comp_grp_type == comp_grp_id) {
+			/* Match found */
+			*comp_grp = comp_grp_local;
+			list_del(&comp_grp_local->list);
+			list_add_tail(&comp_grp_local->list,
+			&ver3_bus_priv->used_comp_grp);
+			return false;
+		}
+	}
+
+	*comp_grp = NULL;
+	return false;
+}
+
+static int cam_vfe_bus_ver3_acquire_comp_grp(
+	struct cam_vfe_bus_ver3_priv        *ver3_bus_priv,
+	struct cam_isp_out_port_info        *out_port_info,
+	void                                *tasklet,
+	void                                *ctx,
+	uint32_t                             is_dual,
+	uint32_t                             is_master,
+	enum cam_vfe_bus_ver3_vfe_core_id    dual_slave_core,
+	struct cam_isp_resource_node       **comp_grp,
+	enum cam_vfe_bus_ver3_comp_grp_type  comp_grp_id)
+{
+	int rc = 0;
+	struct cam_isp_resource_node           *comp_grp_local = NULL;
+	struct cam_vfe_bus_ver3_comp_grp_data  *rsrc_data = NULL;
+	bool previously_acquired = false;
+
+	if (comp_grp_id >= CAM_VFE_BUS_VER3_COMP_GRP_0 &&
+		comp_grp_id <= CAM_VFE_BUS_VER3_COMP_GRP_13) {
+		/* Check if matching comp_grp has already been acquired */
+		previously_acquired = cam_vfe_bus_ver3_match_comp_grp(
+			ver3_bus_priv, &comp_grp_local, comp_grp_id);
+	}
+
+	if (!comp_grp_local) {
+		CAM_ERR(CAM_ISP, "Invalid comp_grp_type:%d", comp_grp_id);
+		return -ENODEV;
+	}
+
+	rsrc_data = comp_grp_local->res_priv;
+
+	if (!previously_acquired) {
+		if (is_dual) {
+			rc = cam_vfe_bus_ver3_get_intra_client_mask(
+				dual_slave_core,
+				comp_grp_local->hw_intf->hw_idx,
+				&rsrc_data->intra_client_mask);
+			if (rc)
+				return rc;
+		}
+
+		comp_grp_local->tasklet_info = tasklet;
+		comp_grp_local->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+		rsrc_data->is_master = is_master;
+		rsrc_data->is_dual = is_dual;
+
+		if (is_master)
+			rsrc_data->addr_sync_mode = 0;
+		else
+			rsrc_data->addr_sync_mode = 1;
+
+	} else {
+		rsrc_data = comp_grp_local->res_priv;
+		/* Do not support runtime change in composite mask */
+		if (comp_grp_local->res_state ==
+			CAM_ISP_RESOURCE_STATE_STREAMING) {
+			CAM_ERR(CAM_ISP, "Invalid State %d Comp Grp %u",
+				comp_grp_local->res_state,
+				rsrc_data->comp_grp_type);
+			return -EBUSY;
+		}
+	}
+
+	CAM_DBG(CAM_ISP, "Acquire comp_grp:%u", rsrc_data->comp_grp_type);
+
+	rsrc_data->ctx = ctx;
+	rsrc_data->acquire_dev_cnt++;
+	*comp_grp = comp_grp_local;
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_release_comp_grp(
+	struct cam_vfe_bus_ver3_priv         *ver3_bus_priv,
+	struct cam_isp_resource_node         *in_comp_grp)
+{
+	struct cam_isp_resource_node           *comp_grp = NULL;
+	struct cam_vfe_bus_ver3_comp_grp_data  *in_rsrc_data = NULL;
+	int match_found = 0;
+
+	if (!in_comp_grp) {
+		CAM_ERR(CAM_ISP, "Invalid Params Comp Grp %pK", in_comp_grp);
+		return -EINVAL;
+	}
+
+	if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "Already released Comp Grp");
+		return 0;
+	}
+
+	if (in_comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING) {
+		CAM_ERR(CAM_ISP, "Invalid State %d",
+			in_comp_grp->res_state);
+		return -EBUSY;
+	}
+
+	in_rsrc_data = in_comp_grp->res_priv;
+	CAM_DBG(CAM_ISP, "Comp Grp type %u", in_rsrc_data->comp_grp_type);
+
+	list_for_each_entry(comp_grp, &ver3_bus_priv->used_comp_grp, list) {
+		if (comp_grp == in_comp_grp) {
+			match_found = 1;
+			break;
+		}
+	}
+
+	if (!match_found) {
+		CAM_ERR(CAM_ISP, "Could not find comp_grp_type:%u",
+			in_rsrc_data->comp_grp_type);
+		return -ENODEV;
+	}
+
+	in_rsrc_data->acquire_dev_cnt--;
+	if (in_rsrc_data->acquire_dev_cnt == 0) {
+		list_del(&comp_grp->list);
+
+		in_rsrc_data->dual_slave_core = CAM_VFE_BUS_VER3_VFE_CORE_MAX;
+		in_rsrc_data->addr_sync_mode = 0;
+
+		comp_grp->tasklet_info = NULL;
+		comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+		list_add_tail(&comp_grp->list, &ver3_bus_priv->free_comp_grp);
+	}
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_start_comp_grp(
+	struct cam_isp_resource_node *comp_grp)
+{
+	int rc = 0;
+	uint32_t val;
+	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
+	struct cam_vfe_bus_ver3_common_data *common_data = NULL;
+	uint32_t bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_MAX] = {0};
+
+	rsrc_data = comp_grp->res_priv;
+	common_data = rsrc_data->common_data;
+
+	CAM_DBG(CAM_ISP, "comp_grp_type:%d streaming state:%d",
+		rsrc_data->comp_grp_type, comp_grp->res_state);
+
+	if (comp_grp->res_state == CAM_ISP_RESOURCE_STATE_STREAMING)
+		return 0;
+
+	if (rsrc_data->is_dual) {
+		if (rsrc_data->is_master) {
+			val = cam_io_r_mb(common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+			val |= (0x1 << (rsrc_data->comp_grp_type + 14));
+			cam_io_w_mb(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+
+			val = cam_io_r_mb(common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+			val |= (0x1 << rsrc_data->comp_grp_type);
+			cam_io_w_mb(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+		} else {
+			val = cam_io_r_mb(common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+			val |= (0x1 << rsrc_data->comp_grp_type);
+			cam_io_w_mb(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_0);
+
+			val = cam_io_r_mb(common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+			val |= (0x1 << rsrc_data->comp_grp_type);
+			cam_io_w_mb(val, common_data->mem_base +
+				common_data->common_reg->comp_cfg_1);
+		}
+	}
+
+	bus_irq_reg_mask[CAM_VFE_BUS_VER3_IRQ_REG0] =
+		(0x1 << (rsrc_data->comp_grp_type + 6));
+
+	/*
+	 * For Dual composite subscribe IRQ only for master
+	 * For regular composite, subscribe IRQ always
+	 */
+	CAM_DBG(CAM_ISP, "Subscribe comp_grp_type:%d IRQ",
+		rsrc_data->comp_grp_type);
+	if ((rsrc_data->is_dual && rsrc_data->is_master) ||
+		(!rsrc_data->is_dual)) {
+		comp_grp->irq_handle = cam_irq_controller_subscribe_irq(
+			common_data->bus_irq_controller, CAM_IRQ_PRIORITY_1,
+			bus_irq_reg_mask, comp_grp,
+			comp_grp->top_half_handler,
+			cam_ife_mgr_do_tasklet_buf_done,
+			comp_grp->tasklet_info, &tasklet_bh_api);
+		if (comp_grp->irq_handle < 0) {
+			CAM_ERR(CAM_ISP, "Subscribe IRQ failed for comp_grp %d",
+				rsrc_data->comp_grp_type);
+			return -EFAULT;
+		}
+	}
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_stop_comp_grp(
+	struct cam_isp_resource_node          *comp_grp)
+{
+	int rc = 0;
+	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
+	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
+
+	rsrc_data = comp_grp->res_priv;
+	common_data = rsrc_data->common_data;
+
+	/* Unsubscribe IRQ */
+	if ((rsrc_data->is_dual && rsrc_data->is_master) ||
+		(!rsrc_data->is_dual)) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			common_data->bus_irq_controller,
+			comp_grp->irq_handle);
+	}
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_handle_comp_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	int32_t                                     rc;
+	int                                         i;
+	struct cam_isp_resource_node               *comp_grp = NULL;
+	struct cam_vfe_bus_ver3_comp_grp_data      *rsrc_data = NULL;
+	struct cam_vfe_bus_irq_evt_payload         *evt_payload;
+
+	comp_grp = th_payload->handler_priv;
+	if (!comp_grp) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "No resource");
+		return -ENODEV;
+	}
+
+	rsrc_data = comp_grp->res_priv;
+
+	CAM_DBG(CAM_ISP, "IRQ status_0 = 0x%x", th_payload->evt_status_arr[0]);
+	CAM_DBG(CAM_ISP, "IRQ status_1 = 0x%x", th_payload->evt_status_arr[1]);
+
+	rc  = cam_vfe_bus_ver3_get_evt_payload(rsrc_data->common_data,
+		&evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"No tasklet_cmd is free in queue");
+		CAM_ERR_RATE_LIMIT(CAM_ISP,
+			"IRQ status_0 = 0x%x status_1 = 0x%x",
+			th_payload->evt_status_arr[0],
+			th_payload->evt_status_arr[1]);
+
+		return rc;
+	}
+
+	cam_isp_hw_get_timestamp(&evt_payload->ts);
+
+	evt_payload->ctx = rsrc_data->ctx;
+	evt_payload->core_index = rsrc_data->common_data->core_index;
+	evt_payload->evt_id  = evt_id;
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_handle_comp_done_bottom_half(
+	void                *handler_priv,
+	void                *evt_payload_priv)
+{
+	int rc = CAM_VFE_IRQ_STATUS_ERR;
+	struct cam_isp_resource_node          *comp_grp = handler_priv;
+	struct cam_vfe_bus_irq_evt_payload    *evt_payload = evt_payload_priv;
+	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = comp_grp->res_priv;
+	uint32_t                              *cam_ife_irq_regs;
+	uint32_t                               status0_reg;
+
+	CAM_DBG(CAM_ISP, "comp_grp_type:%d", rsrc_data->comp_grp_type);
+
+	if (!evt_payload)
+		return rc;
+
+	if (rsrc_data->is_dual && (!rsrc_data->is_master)) {
+		CAM_ERR(CAM_ISP, "Invalid comp_grp_type:%u is_master:%u",
+			rsrc_data->comp_grp_type, rsrc_data->is_master);
+		return rc;
+	}
+
+	cam_ife_irq_regs = evt_payload->irq_reg_val;
+	status0_reg = cam_ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+
+	if (status0_reg & BIT(rsrc_data->comp_grp_type + 6)) {
+		rsrc_data->irq_trigger_cnt++;
+		if (rsrc_data->irq_trigger_cnt ==
+			rsrc_data->acquire_dev_cnt) {
+			cam_ife_irq_regs[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0] &=
+					~BIT(rsrc_data->comp_grp_type + 6);
+			rsrc_data->irq_trigger_cnt = 0;
+		}
+		rc = CAM_VFE_IRQ_STATUS_SUCCESS;
+	}
+
+	CAM_DBG(CAM_ISP, "status_0_reg = 0x%x, bit index = %d rc %d",
+		status0_reg, (rsrc_data->comp_grp_type + 6), rc);
+
+	if (rc == CAM_VFE_IRQ_STATUS_SUCCESS)
+		cam_vfe_bus_ver3_put_evt_payload(rsrc_data->common_data,
+			&evt_payload);
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_init_comp_grp(uint32_t index,
+	struct cam_vfe_bus_ver3_priv    *ver3_bus_priv,
+	struct cam_vfe_bus_ver3_hw_info *ver3_hw_info,
+	struct cam_isp_resource_node    *comp_grp)
+{
+	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data = NULL;
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver3_comp_grp_data),
+		GFP_KERNEL);
+	if (!rsrc_data)
+		return -ENOMEM;
+
+	comp_grp->res_priv = rsrc_data;
+
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&comp_grp->list);
+
+	rsrc_data->comp_grp_type   = index;
+	rsrc_data->common_data     = &ver3_bus_priv->common_data;
+	rsrc_data->dual_slave_core = CAM_VFE_BUS_VER3_VFE_CORE_MAX;
+
+	list_add_tail(&comp_grp->list, &ver3_bus_priv->free_comp_grp);
+
+	comp_grp->start = cam_vfe_bus_ver3_start_comp_grp;
+	comp_grp->stop = cam_vfe_bus_ver3_stop_comp_grp;
+	comp_grp->top_half_handler = cam_vfe_bus_ver3_handle_comp_done_top_half;
+	comp_grp->bottom_half_handler =
+		cam_vfe_bus_ver3_handle_comp_done_bottom_half;
+	comp_grp->hw_intf = ver3_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_deinit_comp_grp(
+	struct cam_isp_resource_node    *comp_grp)
+{
+	struct cam_vfe_bus_ver3_comp_grp_data *rsrc_data =
+		comp_grp->res_priv;
+
+	comp_grp->start = NULL;
+	comp_grp->stop = NULL;
+	comp_grp->top_half_handler = NULL;
+	comp_grp->bottom_half_handler = NULL;
+	comp_grp->hw_intf = NULL;
+
+	list_del_init(&comp_grp->list);
+	comp_grp->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+
+	comp_grp->res_priv = NULL;
+
+	if (!rsrc_data) {
+		CAM_ERR(CAM_ISP, "comp_grp_priv is NULL");
+		return -ENODEV;
+	}
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_get_secure_mode(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	bool *mode = cmd_args;
+	struct cam_isp_resource_node *res =
+		(struct cam_isp_resource_node *) priv;
+	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data =
+		(struct cam_vfe_bus_ver3_vfe_out_data *)res->res_priv;
+
+	*mode = (rsrc_data->secure_mode == CAM_SECURE_MODE_SECURE) ?
+		true : false;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_acquire_vfe_out(void *bus_priv, void *acquire_args,
+	uint32_t args_size)
+{
+	int                                     rc = -ENODEV;
+	int                                     i;
+	enum cam_vfe_bus_ver3_vfe_out_type      vfe_out_res_id;
+	uint32_t                                format;
+	int                                     num_wm;
+	struct cam_vfe_bus_ver3_priv           *ver3_bus_priv = bus_priv;
+	struct cam_vfe_acquire_args            *acq_args = acquire_args;
+	struct cam_vfe_hw_vfe_out_acquire_args *out_acquire_args;
+	struct cam_isp_resource_node           *rsrc_node = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data   *rsrc_data = NULL;
+	uint32_t                                secure_caps = 0, mode;
+	enum cam_vfe_bus_ver3_comp_grp_type     comp_grp_id;
+
+	if (!bus_priv || !acquire_args) {
+		CAM_ERR(CAM_ISP, "Invalid Param");
+		return -EINVAL;
+	}
+
+	out_acquire_args = &acq_args->vfe_out;
+	format = out_acquire_args->out_port_info->format;
+
+	CAM_DBG(CAM_ISP, "Acquiring resource type 0x%x",
+		out_acquire_args->out_port_info->res_type);
+
+	vfe_out_res_id = cam_vfe_bus_ver3_get_out_res_id(
+		out_acquire_args->out_port_info->res_type);
+	if (vfe_out_res_id == CAM_VFE_BUS_VER3_VFE_OUT_MAX)
+		return -ENODEV;
+
+	num_wm = cam_vfe_bus_ver3_get_num_wm(vfe_out_res_id, format);
+	if (num_wm < 1)
+		return -EINVAL;
+
+	rsrc_node = &ver3_bus_priv->vfe_out[vfe_out_res_id];
+	if (rsrc_node->res_state != CAM_ISP_RESOURCE_STATE_AVAILABLE) {
+		CAM_ERR(CAM_ISP, "Resource not available: Res_id %d state:%d",
+			vfe_out_res_id, rsrc_node->res_state);
+		return -EBUSY;
+	}
+
+	rsrc_data = rsrc_node->res_priv;
+	secure_caps = cam_vfe_bus_ver3_can_be_secure(
+		rsrc_data->out_type);
+	mode = out_acquire_args->out_port_info->secure_mode;
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (!rsrc_data->common_data->num_sec_out) {
+			rsrc_data->secure_mode = mode;
+			rsrc_data->common_data->secure_mode = mode;
+		} else {
+			if (mode == rsrc_data->common_data->secure_mode) {
+				rsrc_data->secure_mode =
+					rsrc_data->common_data->secure_mode;
+			} else {
+				rc = -EINVAL;
+				CAM_ERR_RATE_LIMIT(CAM_ISP,
+					"Mismatch: Acquire mode[%d], drvr mode[%d]",
+					rsrc_data->common_data->secure_mode,
+					mode);
+				mutex_unlock(
+					&rsrc_data->common_data->bus_mutex);
+				return -EINVAL;
+			}
+		}
+		rsrc_data->common_data->num_sec_out++;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+	ver3_bus_priv->tasklet_info = acq_args->tasklet;
+	rsrc_data->num_wm = num_wm;
+	rsrc_node->res_id = out_acquire_args->out_port_info->res_type;
+	rsrc_node->tasklet_info = acq_args->tasklet;
+	rsrc_node->cdm_ops = out_acquire_args->cdm_ops;
+	rsrc_data->cdm_util_ops = out_acquire_args->cdm_ops;
+
+	/* Acquire WM and retrieve COMP GRP ID */
+	for (i = 0; i < num_wm; i++) {
+		rc = cam_vfe_bus_ver3_acquire_wm(ver3_bus_priv,
+			out_acquire_args->out_port_info,
+			acq_args->tasklet,
+			out_acquire_args->ctx,
+			vfe_out_res_id,
+			i,
+			&rsrc_data->wm_res[i],
+			out_acquire_args->is_dual,
+			&comp_grp_id,
+			rsrc_node->hw_intf->hw_idx);
+		if (rc) {
+			CAM_ERR(CAM_ISP,
+				"VFE%d WM acquire failed for Out %d rc=%d",
+				rsrc_data->common_data->core_index,
+				vfe_out_res_id, rc);
+			goto release_wm;
+		}
+	}
+
+	/* Acquire composite group using COMP GRP ID */
+	rc = cam_vfe_bus_ver3_acquire_comp_grp(ver3_bus_priv,
+		out_acquire_args->out_port_info,
+		acq_args->tasklet,
+		out_acquire_args->ctx,
+		out_acquire_args->is_dual,
+		out_acquire_args->is_master,
+		out_acquire_args->dual_slave_core,
+		&rsrc_data->comp_grp,
+		comp_grp_id);
+	if (rc) {
+		CAM_ERR(CAM_ISP,
+			"VFE%d Comp_Grp acquire fail for Out %d rc=%d",
+			rsrc_data->common_data->core_index,
+			vfe_out_res_id, rc);
+		return rc;
+	}
+
+
+	rsrc_node->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	out_acquire_args->rsrc_node = rsrc_node;
+
+	CAM_DBG(CAM_ISP, "Acquire successful");
+	return rc;
+
+release_wm:
+	for (i--; i >= 0; i--)
+		cam_vfe_bus_ver3_release_wm(ver3_bus_priv,
+			rsrc_data->wm_res[i]);
+
+	cam_vfe_bus_ver3_release_comp_grp(ver3_bus_priv, rsrc_data->comp_grp);
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_release_vfe_out(void *bus_priv, void *release_args,
+	uint32_t args_size)
+{
+	uint32_t i;
+	struct cam_isp_resource_node          *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data  *rsrc_data = NULL;
+	uint32_t                               secure_caps = 0;
+
+	if (!bus_priv || !release_args) {
+		CAM_ERR(CAM_ISP, "Invalid input bus_priv %pK release_args %pK",
+			bus_priv, release_args);
+		return -EINVAL;
+	}
+
+	vfe_out = release_args;
+	rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
+			vfe_out->res_state);
+	}
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		cam_vfe_bus_ver3_release_wm(bus_priv, rsrc_data->wm_res[i]);
+	rsrc_data->num_wm = 0;
+
+	if (rsrc_data->comp_grp)
+		cam_vfe_bus_ver3_release_comp_grp(bus_priv,
+			rsrc_data->comp_grp);
+	rsrc_data->comp_grp = NULL;
+
+	vfe_out->tasklet_info = NULL;
+	vfe_out->cdm_ops = NULL;
+	rsrc_data->cdm_util_ops = NULL;
+
+	secure_caps = cam_vfe_bus_ver3_can_be_secure(rsrc_data->out_type);
+	mutex_lock(&rsrc_data->common_data->bus_mutex);
+	if (secure_caps) {
+		if (rsrc_data->secure_mode ==
+			rsrc_data->common_data->secure_mode) {
+			rsrc_data->common_data->num_sec_out--;
+			rsrc_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+		} else {
+			/*
+			 * The validity of the mode is properly
+			 * checked while acquiring the output port.
+			 * not expected to reach here, unless there is
+			 * some corruption.
+			 */
+			CAM_ERR(CAM_ISP, "driver[%d],resource[%d] mismatch",
+				rsrc_data->common_data->secure_mode,
+				rsrc_data->secure_mode);
+		}
+
+		if (!rsrc_data->common_data->num_sec_out)
+			rsrc_data->common_data->secure_mode =
+				CAM_SECURE_MODE_NON_SECURE;
+	}
+	mutex_unlock(&rsrc_data->common_data->bus_mutex);
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED)
+		vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_start_vfe_out(
+	struct cam_isp_resource_node          *vfe_out)
+{
+	int rc = 0, i;
+	struct cam_vfe_bus_ver3_vfe_out_data  *rsrc_data = NULL;
+	struct cam_vfe_bus_ver3_common_data   *common_data = NULL;
+
+	if (!vfe_out) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+	common_data = rsrc_data->common_data;
+
+	CAM_DBG(CAM_ISP, "Start resource index %d", rsrc_data->out_type);
+
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_ERR(CAM_ISP, "Invalid resource state:%d",
+			vfe_out->res_state);
+		return -EACCES;
+	}
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_vfe_bus_ver3_start_wm(rsrc_data->wm_res[i]);
+
+	if (rsrc_data->comp_grp)
+		rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data->comp_grp);
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_stop_vfe_out(
+	struct cam_isp_resource_node          *vfe_out)
+{
+	int rc = 0, i;
+	struct cam_vfe_bus_ver3_vfe_out_data  *rsrc_data = NULL;
+
+	if (!vfe_out) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+
+	rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_AVAILABLE ||
+		vfe_out->res_state == CAM_ISP_RESOURCE_STATE_RESERVED) {
+		CAM_DBG(CAM_ISP, "vfe_out res_state is %d", vfe_out->res_state);
+		return rc;
+	}
+
+	if (rsrc_data->comp_grp)
+		rc = cam_vfe_bus_ver3_stop_comp_grp(rsrc_data->comp_grp);
+
+	for (i = 0; i < rsrc_data->num_wm; i++)
+		rc = cam_vfe_bus_ver3_stop_wm(rsrc_data->wm_res[i]);
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_RESERVED;
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_handle_vfe_out_done_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	return -EPERM;
+}
+
+static int cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half(
+	void                *handler_priv,
+	void                *evt_payload_priv)
+{
+	int rc = -EINVAL;
+	struct cam_isp_resource_node         *vfe_out = handler_priv;
+	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+	/* We only handle composite buf done */
+	if (rsrc_data->comp_grp) {
+		rc = rsrc_data->comp_grp->bottom_half_handler(
+			rsrc_data->comp_grp, evt_payload_priv);
+	}
+
+	CAM_DBG(CAM_ISP, "vfe_out %d rc %d", rsrc_data->out_type, rc);
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_init_vfe_out_resource(uint32_t  index,
+	struct cam_vfe_bus_ver3_priv                  *ver3_bus_priv,
+	struct cam_vfe_bus_ver3_hw_info               *ver3_hw_info)
+{
+	struct cam_isp_resource_node         *vfe_out = NULL;
+	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = NULL;
+	int rc = 0;
+	int32_t vfe_out_type =
+		ver3_hw_info->vfe_out_hw_info[index].vfe_out_type;
+
+	if (vfe_out_type < 0 ||
+		vfe_out_type >= CAM_VFE_BUS_VER3_VFE_OUT_MAX) {
+		CAM_ERR(CAM_ISP, "Init VFE Out failed, Invalid type=%d",
+			vfe_out_type);
+		return -EINVAL;
+	}
+
+	vfe_out = &ver3_bus_priv->vfe_out[vfe_out_type];
+	if (vfe_out->res_state != CAM_ISP_RESOURCE_STATE_UNAVAILABLE ||
+		vfe_out->res_priv) {
+		CAM_ERR(CAM_ISP,
+			"vfe_out_type %d has already been initialized");
+		return -EFAULT;
+	}
+
+	rsrc_data = kzalloc(sizeof(struct cam_vfe_bus_ver3_vfe_out_data),
+		GFP_KERNEL);
+	if (!rsrc_data) {
+		rc = -ENOMEM;
+		return rc;
+	}
+
+	vfe_out->res_priv = rsrc_data;
+
+	vfe_out->res_type = CAM_ISP_RESOURCE_VFE_OUT;
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_AVAILABLE;
+	INIT_LIST_HEAD(&vfe_out->list);
+
+	rsrc_data->out_type    =
+		ver3_hw_info->vfe_out_hw_info[index].vfe_out_type;
+	rsrc_data->common_data = &ver3_bus_priv->common_data;
+	rsrc_data->max_width   =
+		ver3_hw_info->vfe_out_hw_info[index].max_width;
+	rsrc_data->max_height  =
+		ver3_hw_info->vfe_out_hw_info[index].max_height;
+	rsrc_data->secure_mode = CAM_SECURE_MODE_NON_SECURE;
+
+	vfe_out->start = cam_vfe_bus_ver3_start_vfe_out;
+	vfe_out->stop = cam_vfe_bus_ver3_stop_vfe_out;
+	vfe_out->top_half_handler =
+		cam_vfe_bus_ver3_handle_vfe_out_done_top_half;
+	vfe_out->bottom_half_handler =
+		cam_vfe_bus_ver3_handle_vfe_out_done_bottom_half;
+	vfe_out->process_cmd = cam_vfe_bus_ver3_process_cmd;
+	vfe_out->hw_intf = ver3_bus_priv->common_data.hw_intf;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_deinit_vfe_out_resource(
+	struct cam_isp_resource_node    *vfe_out)
+{
+	struct cam_vfe_bus_ver3_vfe_out_data *rsrc_data = vfe_out->res_priv;
+
+	if (vfe_out->res_state == CAM_ISP_RESOURCE_STATE_UNAVAILABLE) {
+		/*
+		 * This is not error. It can happen if the resource is
+		 * never supported in the HW.
+		 */
+		CAM_DBG(CAM_ISP, "HW%d Res %d already deinitialized");
+		return 0;
+	}
+
+	vfe_out->start = NULL;
+	vfe_out->stop = NULL;
+	vfe_out->top_half_handler = NULL;
+	vfe_out->bottom_half_handler = NULL;
+	vfe_out->hw_intf = NULL;
+
+	vfe_out->res_state = CAM_ISP_RESOURCE_STATE_UNAVAILABLE;
+	INIT_LIST_HEAD(&vfe_out->list);
+	vfe_out->res_priv = NULL;
+
+	if (!rsrc_data)
+		return -ENOMEM;
+	kfree(rsrc_data);
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_handle_irq(uint32_t    evt_id,
+	struct cam_irq_th_payload                 *th_payload)
+{
+	struct cam_vfe_bus_ver3_priv          *bus_priv;
+	int rc = 0;
+
+	bus_priv     = th_payload->handler_priv;
+	CAM_DBG(CAM_ISP, "Enter");
+	rc = cam_irq_controller_handle_irq(evt_id,
+		bus_priv->common_data.bus_irq_controller);
+	return (rc == IRQ_HANDLED) ? 0 : -EINVAL;
+}
+
+static int cam_vfe_bus_ver3_err_irq_top_half(uint32_t evt_id,
+	struct cam_irq_th_payload *th_payload)
+{
+	int i = 0, rc = 0;
+	struct cam_vfe_bus_ver3_priv *bus_priv =
+		th_payload->handler_priv;
+	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+
+	CAM_ERR_RATE_LIMIT(CAM_ISP, "Bus Err IRQ");
+	for (i = 0; i < th_payload->num_registers; i++) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "vfe:%d: IRQ_Status%d: 0x%x",
+		bus_priv->common_data.core_index, i,
+			th_payload->evt_status_arr[i]);
+	}
+	cam_irq_controller_disable_irq(bus_priv->common_data.bus_irq_controller,
+		bus_priv->error_irq_handle);
+
+	rc  = cam_vfe_bus_ver3_get_evt_payload(&bus_priv->common_data,
+		&evt_payload);
+	if (rc) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Cannot get payload");
+		return rc;
+	}
+
+	for (i = 0; i < th_payload->num_registers; i++)
+		evt_payload->irq_reg_val[i] = th_payload->evt_status_arr[i];
+
+	evt_payload->core_index = bus_priv->common_data.core_index;
+	evt_payload->evt_id  = evt_id;
+
+	evt_payload->ctx = &bus_priv->common_data;
+	evt_payload->ccif_violation_status = cam_io_r_mb(
+		bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->ccif_violation_status);
+
+	evt_payload->overflow_status = cam_io_r_mb(
+		bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->overflow_status);
+	cam_io_w_mb(0x1, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->overflow_status_clear);
+
+	evt_payload->image_size_violation_status = cam_io_r_mb(
+		bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->image_size_violation_status);
+
+	th_payload->evt_payload_priv = evt_payload;
+
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_err_irq_bottom_half(void *ctx_priv,
+	void *evt_payload_priv)
+{
+	struct cam_vfe_bus_irq_evt_payload *evt_payload;
+	struct cam_vfe_bus_ver3_common_data *common_data;
+	uint32_t val = 0, image_size_violation = 0, ccif_violation = 0;
+
+	if (!ctx_priv || !evt_payload_priv)
+		return -EINVAL;
+
+	evt_payload = evt_payload_priv;
+	common_data = evt_payload->ctx;
+
+	val = evt_payload->irq_reg_val[CAM_IFE_IRQ_BUS_VER3_REG_STATUS0];
+	image_size_violation = (val >> 31) & 0x1;
+	ccif_violation = (val >> 30) & 0x1;
+
+	CAM_ERR(CAM_ISP, "Bus Violation");
+	CAM_ERR(CAM_ISP, "image_size_violation %d ccif_violation %d",
+		image_size_violation, ccif_violation);
+
+	if (image_size_violation) {
+		val = evt_payload->image_size_violation_status;
+
+		if (val & 0x01)
+			CAM_INFO(CAM_ISP, "VID Y 1:1 image size violation");
+
+		if (val & 0x02)
+			CAM_INFO(CAM_ISP, "VID C 1:1 image size violation");
+
+		if (val & 0x04)
+			CAM_INFO(CAM_ISP, "VID YC 4:1 image size violation");
+
+		if (val & 0x08)
+			CAM_INFO(CAM_ISP, "VID YC 16:1 image size violation");
+
+		if (val & 0x010)
+			CAM_INFO(CAM_ISP, "DISP Y 1:1 image size violation");
+
+		if (val & 0x020)
+			CAM_INFO(CAM_ISP, "DISP C 1:1 image size violation");
+
+		if (val & 0x040)
+			CAM_INFO(CAM_ISP, "DISP YC 4:1 image size violation");
+
+		if (val & 0x080)
+			CAM_INFO(CAM_ISP, "DISP YC 16:1 image size violation");
+
+		if (val & 0x0100)
+			CAM_INFO(CAM_ISP, "FD Y image size violation");
+
+		if (val & 0x0200)
+			CAM_INFO(CAM_ISP, "FD C image size violation");
+
+		if (val & 0x0400)
+			CAM_INFO(CAM_ISP,
+			"PIXEL RAW DUMP image size violation");
+
+		if (val & 0x0800)
+			CAM_INFO(CAM_ISP, "CAMIF PD image size violation");
+
+		if (val & 0x01000)
+			CAM_INFO(CAM_ISP, "STATS HDR BE image size violation");
+
+		if (val & 0x02000)
+			CAM_INFO(CAM_ISP,
+			"STATS HDR BHIST image size violation");
+
+		if (val & 0x04000)
+			CAM_INFO(CAM_ISP,
+			"STATS TINTLESS BG image size violation");
+
+		if (val & 0x08000)
+			CAM_INFO(CAM_ISP, "STATS AWB BG image size violation");
+
+		if (val & 0x010000)
+			CAM_INFO(CAM_ISP, "STATS BHIST image size violation");
+
+		if (val & 0x020000)
+			CAM_INFO(CAM_ISP, "STATS RS image size violation");
+
+		if (val & 0x040000)
+			CAM_INFO(CAM_ISP, "STATS CS image size violation");
+
+		if (val & 0x080000)
+			CAM_INFO(CAM_ISP, "STATS IHIST image size violation");
+
+		if (val & 0x0100000)
+			CAM_INFO(CAM_ISP, "STATS BAF image size violation");
+
+		if (val & 0x0200000)
+			CAM_INFO(CAM_ISP, "PDAF image size violation");
+
+		if (val & 0x0400000)
+			CAM_INFO(CAM_ISP, "LCR image size violation");
+
+		if (val & 0x0800000)
+			CAM_INFO(CAM_ISP, "RDI 0 image size violation");
+
+		if (val & 0x01000000)
+			CAM_INFO(CAM_ISP, "RDI 1 image size violation");
+
+		if (val & 0x02000000)
+			CAM_INFO(CAM_ISP, "RDI 2 image size violation");
+
+	}
+
+	if (ccif_violation) {
+		val = evt_payload->ccif_violation_status;
+
+		if (val & 0x01)
+			CAM_INFO(CAM_ISP, "VID Y 1:1 ccif violation");
+
+		if (val & 0x02)
+			CAM_INFO(CAM_ISP, "VID C 1:1 ccif violation");
+
+		if (val & 0x04)
+			CAM_INFO(CAM_ISP, "VID YC 4:1 ccif violation");
+
+		if (val & 0x08)
+			CAM_INFO(CAM_ISP, "VID YC 16:1 ccif violation");
+
+		if (val & 0x010)
+			CAM_INFO(CAM_ISP, "DISP Y 1:1 ccif violation");
+
+		if (val & 0x020)
+			CAM_INFO(CAM_ISP, "DISP C 1:1 ccif violation");
+
+		if (val & 0x040)
+			CAM_INFO(CAM_ISP, "DISP YC 4:1 ccif violation");
+
+		if (val & 0x080)
+			CAM_INFO(CAM_ISP, "DISP YC 16:1 ccif violation");
+
+		if (val & 0x0100)
+			CAM_INFO(CAM_ISP, "FD Y ccif violation");
+
+		if (val & 0x0200)
+			CAM_INFO(CAM_ISP, "FD C ccif violation");
+
+		if (val & 0x0400)
+			CAM_INFO(CAM_ISP, "PIXEL RAW DUMP ccif violation");
+
+		if (val & 0x0800)
+			CAM_INFO(CAM_ISP, "CAMIF PD ccif violation");
+
+		if (val & 0x01000)
+			CAM_INFO(CAM_ISP, "STATS HDR BE ccif violation");
+
+		if (val & 0x02000)
+			CAM_INFO(CAM_ISP, "STATS HDR BHIST ccif violation");
+
+		if (val & 0x04000)
+			CAM_INFO(CAM_ISP, "STATS TINTLESS BG ccif violation");
+
+		if (val & 0x08000)
+			CAM_INFO(CAM_ISP, "STATS AWB BG ccif violation");
+
+		if (val & 0x010000)
+			CAM_INFO(CAM_ISP, "STATS BHIST ccif violation");
+
+		if (val & 0x020000)
+			CAM_INFO(CAM_ISP, "STATS RS ccif violation");
+
+		if (val & 0x040000)
+			CAM_INFO(CAM_ISP, "STATS CS ccif violation");
+
+		if (val & 0x080000)
+			CAM_INFO(CAM_ISP, "STATS IHIST ccif violation");
+
+		if (val & 0x0100000)
+			CAM_INFO(CAM_ISP, "STATS BAF ccif violation");
+
+		if (val & 0x0200000)
+			CAM_INFO(CAM_ISP, "PDAF ccif violation");
+
+		if (val & 0x0400000)
+			CAM_INFO(CAM_ISP, "LCR ccif violation");
+
+		if (val & 0x0800000)
+			CAM_INFO(CAM_ISP, "RDI 0 ccif violation");
+
+		if (val & 0x01000000)
+			CAM_INFO(CAM_ISP, "RDI 1 ccif violation");
+
+		if (val & 0x02000000)
+			CAM_INFO(CAM_ISP, "RDI 2 ccif violation");
+
+	}
+
+	cam_vfe_bus_ver3_put_evt_payload(common_data, &evt_payload);
+	return 0;
+}
+
+static void cam_vfe_bus_ver3_update_ubwc_meta_addr(
+	uint32_t *reg_val_pair,
+	uint32_t  *j,
+	void     *regs,
+	uint64_t  image_buf)
+{
+	struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs;
+
+	if (!regs || !reg_val_pair || !j) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		goto end;
+	}
+
+	ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *)regs;
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->meta_addr, image_buf);
+
+end:
+	return;
+}
+
+static int cam_vfe_bus_ver3_update_ubwc_regs(
+	struct cam_vfe_bus_ver3_wm_resource_data *wm_data,
+	uint32_t *reg_val_pair,	uint32_t i, uint32_t *j)
+{
+	struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_regs;
+	uint32_t ubwc_bw_limit = 0;
+	int rc = 0;
+
+	if (!wm_data || !reg_val_pair || !j) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		rc = -EINVAL;
+		goto end;
+	}
+
+	ubwc_regs = (struct cam_vfe_bus_ver3_reg_offset_ubwc_client *)
+		wm_data->hw_regs->ubwc_regs;
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		wm_data->hw_regs->packer_cfg, wm_data->packer_cfg);
+	CAM_DBG(CAM_ISP, "WM:%d packer cfg 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	if (wm_data->is_dual) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			wm_data->hw_regs->image_cfg_1, wm_data->offset);
+	} else {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			wm_data->hw_regs->image_cfg_1, wm_data->h_init);
+		CAM_DBG(CAM_ISP, "WM:%d h_init 0x%x",
+			wm_data->index, reg_val_pair[*j-1]);
+	}
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->meta_cfg, wm_data->ubwc_meta_cfg);
+	CAM_DBG(CAM_ISP, "WM:%d meta stride 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->mode_cfg, wm_data->ubwc_mode_cfg);
+	CAM_DBG(CAM_ISP, "WM:%d ubwc_mode_cfg 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+		ubwc_regs->ctrl_2, wm_data->ubwc_ctrl_2);
+	CAM_DBG(CAM_ISP, "WM:%d ubwc_ctrl_2 0x%x",
+		wm_data->index, reg_val_pair[*j-1]);
+
+	switch (wm_data->format) {
+	case CAM_FORMAT_UBWC_TP10:
+		ubwc_bw_limit = (0x8 << 1) | BIT(0);
+		break;
+	case CAM_FORMAT_UBWC_NV12_4R:
+		ubwc_bw_limit = (0xB << 1) | BIT(0);
+		break;
+	default:
+		ubwc_bw_limit = 0;
+		break;
+	}
+
+	if (ubwc_bw_limit) {
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, *j,
+			ubwc_regs->bw_limit, ubwc_bw_limit);
+		CAM_DBG(CAM_ISP, "WM:%d ubwc bw limit 0x%x",
+			wm_data->index, ubwc_bw_limit);
+	}
+
+end:
+	return rc;
+}
+
+static int cam_vfe_bus_ver3_update_wm(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver3_priv             *bus_priv;
+	struct cam_isp_hw_get_cmd_update         *update_buf;
+	struct cam_buf_io_cfg                    *io_cfg;
+	struct cam_vfe_bus_ver3_vfe_out_data     *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL;
+	struct cam_vfe_bus_ver3_reg_offset_ubwc_client *ubwc_client = NULL;
+	uint32_t *reg_val_pair;
+	uint32_t  i, j, k, size = 0;
+	uint32_t  frame_inc = 0, val;
+	uint32_t loop_size = 0;
+
+	bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
+	update_buf =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *)
+		update_buf->res->res_priv;
+
+	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	if (update_buf->wm_update->num_buf != vfe_out_data->num_wm) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Invalid number buffers:%d required:%d",
+			update_buf->wm_update->num_buf, vfe_out_data->num_wm);
+		return -EINVAL;
+	}
+
+	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+	io_cfg = update_buf->wm_update->io_cfg;
+
+	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+			CAM_ERR(CAM_ISP,
+				"reg_val_pair %d exceeds the array limit %zu",
+				j, MAX_REG_VAL_PAIR_SIZE);
+			return -ENOMEM;
+		}
+
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+		ubwc_client = wm_data->hw_regs->ubwc_regs;
+		/* update width register */
+		val = cam_io_r_mb(wm_data->common_data->mem_base +
+			wm_data->hw_regs->image_cfg_0);
+		/* mask previously written width but preserve height */
+		val = val & 0xFFFF0000;
+		val |= wm_data->width;
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->image_cfg_0, val);
+		CAM_DBG(CAM_ISP, "WM %d image height and width 0x%x",
+			wm_data->index, reg_val_pair[j-1]);
+
+		/* For initial configuration program all bus registers */
+		val = io_cfg->planes[i].plane_stride;
+		CAM_DBG(CAM_ISP, "before stride %d", val);
+		val = ALIGNUP(val, 16);
+		if (val != io_cfg->planes[i].plane_stride &&
+			val != wm_data->stride)
+			CAM_WARN(CAM_ISP, "Warning stride %u expected %u",
+				io_cfg->planes[i].plane_stride, val);
+
+		if ((wm_data->stride != val ||
+			!wm_data->init_cfg_done) && (wm_data->index < 23)) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->image_cfg_2,
+				io_cfg->planes[i].plane_stride);
+			wm_data->stride = val;
+			CAM_DBG(CAM_ISP, "WM %d image stride 0x%x",
+				wm_data->index, reg_val_pair[j-1]);
+		}
+
+		if (wm_data->en_ubwc) {
+			if (!wm_data->hw_regs->ubwc_regs) {
+				CAM_ERR(CAM_ISP,
+					"No UBWC register to configure.");
+				return -EINVAL;
+			}
+			if (wm_data->ubwc_updated) {
+				wm_data->ubwc_updated = false;
+				cam_vfe_bus_ver3_update_ubwc_regs(
+					wm_data, reg_val_pair, i, &j);
+			}
+
+			/* UBWC meta address */
+			cam_vfe_bus_ver3_update_ubwc_meta_addr(
+				reg_val_pair, &j,
+				wm_data->hw_regs->ubwc_regs,
+				update_buf->wm_update->image_buf[i]);
+			CAM_DBG(CAM_ISP, "WM %d ubwc meta addr 0x%llx",
+				wm_data->index,
+				update_buf->wm_update->image_buf[i]);
+		}
+
+		if (wm_data->en_ubwc) {
+			frame_inc = ALIGNUP(io_cfg->planes[i].plane_stride *
+			    io_cfg->planes[i].slice_height, 4096);
+			frame_inc += io_cfg->planes[i].meta_size;
+			CAM_DBG(CAM_ISP,
+				"WM %d frm %d: ht: %d stride %d meta: %d",
+				wm_data->index, frame_inc,
+				io_cfg->planes[i].slice_height,
+				io_cfg->planes[i].plane_stride,
+				io_cfg->planes[i].meta_size);
+		} else {
+			frame_inc = io_cfg->planes[i].plane_stride *
+				io_cfg->planes[i].slice_height;
+		}
+
+		if (wm_data->index < 3)
+			loop_size = wm_data->irq_subsample_period + 1;
+		else
+			loop_size = 1;
+
+		/* WM Image address */
+		for (k = 0; k < loop_size; k++) {
+			if (wm_data->en_ubwc)
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->image_addr,
+					update_buf->wm_update->image_buf[i] +
+					io_cfg->planes[i].meta_size +
+					k * frame_inc);
+			else
+				CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+					wm_data->hw_regs->image_addr,
+					update_buf->wm_update->image_buf[i] +
+					wm_data->offset + k * frame_inc);
+			CAM_DBG(CAM_ISP, "WM %d image address 0x%x",
+				wm_data->index, reg_val_pair[j-1]);
+		}
+
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->frame_incr, frame_inc);
+		CAM_DBG(CAM_ISP, "WM %d frame_inc %d",
+			wm_data->index, reg_val_pair[j-1]);
+
+
+		/* enable the WM */
+		CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+			wm_data->hw_regs->cfg,
+			wm_data->en_cfg);
+
+		/* set initial configuration done */
+		if (!wm_data->init_cfg_done)
+			wm_data->init_cfg_done = true;
+	}
+
+	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > update_buf->cmd.size) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Buf size:%d insufficient, expected size:%d",
+			update_buf->cmd.size, size);
+		return -ENOMEM;
+	}
+
+	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
+		update_buf->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	update_buf->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_update_hfr(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver3_priv             *bus_priv;
+	struct cam_isp_hw_get_cmd_update         *update_hfr;
+	struct cam_vfe_bus_ver3_vfe_out_data     *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver3_wm_resource_data *wm_data = NULL;
+	struct cam_isp_port_hfr_config           *hfr_cfg = NULL;
+	uint32_t *reg_val_pair;
+	uint32_t  i, j, size = 0;
+
+	bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
+	update_hfr =  (struct cam_isp_hw_get_cmd_update *) cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *)
+		update_hfr->res->res_priv;
+
+	if (!vfe_out_data || !vfe_out_data->cdm_util_ops) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	reg_val_pair = &vfe_out_data->common_data->io_buf_update[0];
+	hfr_cfg = update_hfr->hfr_update;
+
+	for (i = 0, j = 0; i < vfe_out_data->num_wm; i++) {
+		if (j >= (MAX_REG_VAL_PAIR_SIZE - MAX_BUF_UPDATE_REG_NUM * 2)) {
+			CAM_ERR(CAM_ISP,
+				"reg_val_pair %d exceeds the array limit %zu",
+				j, MAX_REG_VAL_PAIR_SIZE);
+			return -ENOMEM;
+		}
+
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+
+		if (wm_data->index > 22 && hfr_cfg->subsample_period > 3) {
+			CAM_ERR(CAM_ISP,
+				"RDI doesn't support irq subsample period %d",
+				hfr_cfg->subsample_period);
+			return -EINVAL;
+		}
+
+		if ((wm_data->framedrop_pattern !=
+			hfr_cfg->framedrop_pattern) ||
+			!wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_pattern,
+				hfr_cfg->framedrop_pattern);
+			wm_data->framedrop_pattern = hfr_cfg->framedrop_pattern;
+			CAM_DBG(CAM_ISP, "WM:%d framedrop pattern 0x%x",
+				wm_data->index, wm_data->framedrop_pattern);
+		}
+
+		if (wm_data->framedrop_period != hfr_cfg->framedrop_period ||
+			!wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->framedrop_period,
+				hfr_cfg->framedrop_period);
+			wm_data->framedrop_period = hfr_cfg->framedrop_period;
+			CAM_DBG(CAM_ISP, "WM:%d framedrop period 0x%x",
+				wm_data->index, wm_data->framedrop_period);
+		}
+
+		if (wm_data->irq_subsample_period != hfr_cfg->subsample_period
+			|| !wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_period,
+				hfr_cfg->subsample_period);
+			wm_data->irq_subsample_period =
+				hfr_cfg->subsample_period;
+			CAM_DBG(CAM_ISP, "WM:%d irq subsample period 0x%x",
+				wm_data->index, wm_data->irq_subsample_period);
+		}
+
+		if (wm_data->irq_subsample_pattern != hfr_cfg->subsample_pattern
+			|| !wm_data->hfr_cfg_done) {
+			CAM_VFE_ADD_REG_VAL_PAIR(reg_val_pair, j,
+				wm_data->hw_regs->irq_subsample_pattern,
+				hfr_cfg->subsample_pattern);
+			wm_data->irq_subsample_pattern =
+				hfr_cfg->subsample_pattern;
+			CAM_DBG(CAM_ISP, "WM:%d irq subsample pattern 0x%x",
+				wm_data->index, wm_data->irq_subsample_pattern);
+		}
+
+		/* set initial configuration done */
+		if (!wm_data->hfr_cfg_done)
+			wm_data->hfr_cfg_done = true;
+	}
+
+	size = vfe_out_data->cdm_util_ops->cdm_required_size_reg_random(j/2);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	if ((size * 4) > update_hfr->cmd.size) {
+		CAM_ERR(CAM_ISP,
+			"Failed! Buf size:%d insufficient, expected size:%d",
+			update_hfr->cmd.size, size);
+		return -ENOMEM;
+	}
+
+	vfe_out_data->cdm_util_ops->cdm_write_regrandom(
+		update_hfr->cmd.cmd_buf_addr, j/2, reg_val_pair);
+
+	/* cdm util returns dwords, need to convert to bytes */
+	update_hfr->cmd.used_bytes = size * 4;
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_update_ubwc_config(void *cmd_args)
+{
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_update_stripe_cfg(void *priv, void *cmd_args,
+	uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver3_priv                *bus_priv;
+	struct cam_isp_hw_dual_isp_update_args      *stripe_args;
+	struct cam_vfe_bus_ver3_vfe_out_data        *vfe_out_data = NULL;
+	struct cam_vfe_bus_ver3_wm_resource_data    *wm_data = NULL;
+	struct cam_isp_dual_stripe_config           *stripe_config;
+	uint32_t outport_id, ports_plane_idx, i;
+
+	bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
+	stripe_args = (struct cam_isp_hw_dual_isp_update_args *)cmd_args;
+
+	vfe_out_data = (struct cam_vfe_bus_ver3_vfe_out_data *)
+		stripe_args->res->res_priv;
+
+	if (!vfe_out_data) {
+		CAM_ERR(CAM_ISP, "Failed! Invalid data");
+		return -EINVAL;
+	}
+
+	outport_id = stripe_args->res->res_id & 0xFF;
+	if (stripe_args->res->res_id < CAM_ISP_IFE_OUT_RES_BASE ||
+		stripe_args->res->res_id >= CAM_ISP_IFE_OUT_RES_MAX)
+		return 0;
+
+	ports_plane_idx = (stripe_args->split_id *
+	(stripe_args->dual_cfg->num_ports * CAM_PACKET_MAX_PLANES)) +
+	(outport_id * CAM_PACKET_MAX_PLANES);
+	for (i = 0; i < vfe_out_data->num_wm; i++) {
+		wm_data = vfe_out_data->wm_res[i]->res_priv;
+		stripe_config = (struct cam_isp_dual_stripe_config  *)
+			&stripe_args->dual_cfg->stripes[ports_plane_idx + i];
+		wm_data->width = stripe_config->width;
+		wm_data->offset = stripe_config->offset;
+		CAM_DBG(CAM_ISP, "id:%x WM:%d width:0x%x offset:%x",
+			stripe_args->res->res_id, wm_data->index,
+			wm_data->width, wm_data->offset);
+	}
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_start_hw(void *hw_priv,
+	void *start_hw_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_ver3_start_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_ver3_stop_hw(void *hw_priv,
+	void *stop_hw_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_ver3_stop_vfe_out(hw_priv);
+}
+
+static int cam_vfe_bus_ver3_init_hw(void *hw_priv,
+	void *init_hw_args, uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver3_priv    *bus_priv = hw_priv;
+	uint32_t                         top_irq_reg_mask[2] = {0};
+
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "Invalid args");
+		return -EINVAL;
+	}
+
+	top_irq_reg_mask[0] = (1 << 7);
+
+	bus_priv->irq_handle = cam_irq_controller_subscribe_irq(
+		bus_priv->common_data.vfe_irq_controller,
+		CAM_IRQ_PRIORITY_2,
+		top_irq_reg_mask,
+		bus_priv,
+		cam_vfe_bus_ver3_handle_irq,
+		NULL,
+		NULL,
+		NULL);
+
+	if (bus_priv->irq_handle <= 0) {
+		CAM_ERR(CAM_ISP, "Failed to subscribe BUS IRQ");
+		return -EFAULT;
+	}
+
+	if (bus_priv->tasklet_info != NULL) {
+		bus_priv->error_irq_handle = cam_irq_controller_subscribe_irq(
+			bus_priv->common_data.bus_irq_controller,
+			CAM_IRQ_PRIORITY_0,
+			bus_error_irq_mask,
+			bus_priv,
+			cam_vfe_bus_ver3_err_irq_top_half,
+			cam_vfe_bus_ver3_err_irq_bottom_half,
+			bus_priv->tasklet_info,
+			&tasklet_bh_api);
+
+		if (bus_priv->error_irq_handle <= 0) {
+			CAM_ERR(CAM_ISP, "Failed to subscribe BUS Error IRQ");
+			return -EFAULT;
+		}
+	}
+
+	// no clock gating at bus input
+	cam_io_w_mb(0xFFFFF, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->cgc_ovd);
+
+	// BUS_WR_TEST_BUS_CTRL
+	cam_io_w_mb(0x0, bus_priv->common_data.mem_base +
+		bus_priv->common_data.common_reg->test_bus_ctrl);
+
+	return 0;
+}
+
+static int cam_vfe_bus_ver3_deinit_hw(void *hw_priv,
+	void *deinit_hw_args, uint32_t arg_size)
+{
+	struct cam_vfe_bus_ver3_priv    *bus_priv = hw_priv;
+	int                              rc = 0, i;
+
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "Error: Invalid args");
+		return -EINVAL;
+	}
+
+	if (bus_priv->error_irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			bus_priv->common_data.bus_irq_controller,
+			bus_priv->error_irq_handle);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+				"Failed to unsubscribe error irq rc=%d", rc);
+
+		bus_priv->error_irq_handle = 0;
+	}
+
+	if (bus_priv->irq_handle) {
+		rc = cam_irq_controller_unsubscribe_irq(
+			bus_priv->common_data.vfe_irq_controller,
+			bus_priv->irq_handle);
+		if (rc)
+			CAM_ERR(CAM_ISP,
+				"Failed to unsubscribe irq rc=%d", rc);
+
+		bus_priv->irq_handle = 0;
+	}
+
+	INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+	for (i = 0; i < CAM_VFE_BUS_VER3_PAYLOAD_MAX; i++) {
+		INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+		list_add_tail(&bus_priv->common_data.evt_payload[i].list,
+			&bus_priv->common_data.free_payload_list);
+	}
+
+	return rc;
+}
+
+static int __cam_vfe_bus_ver3_process_cmd(void *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	return cam_vfe_bus_ver3_process_cmd(priv, cmd_type, cmd_args, arg_size);
+}
+
+static int cam_vfe_bus_ver3_process_cmd(
+	struct cam_isp_resource_node *priv,
+	uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
+{
+	int rc = -EINVAL;
+	struct cam_vfe_bus_ver3_priv		 *bus_priv;
+
+	if (!priv || !cmd_args) {
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid input arguments");
+		return -EINVAL;
+	}
+
+	switch (cmd_type) {
+	case CAM_ISP_HW_CMD_GET_BUF_UPDATE:
+		rc = cam_vfe_bus_ver3_update_wm(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_HFR_UPDATE:
+		rc = cam_vfe_bus_ver3_update_hfr(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_GET_SECURE_MODE:
+		rc = cam_vfe_bus_ver3_get_secure_mode(priv, cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_STRIPE_UPDATE:
+		rc = cam_vfe_bus_ver3_update_stripe_cfg(priv,
+			cmd_args, arg_size);
+		break;
+	case CAM_ISP_HW_CMD_STOP_BUS_ERR_IRQ:
+		bus_priv = (struct cam_vfe_bus_ver3_priv  *) priv;
+		if (bus_priv->error_irq_handle) {
+			CAM_DBG(CAM_ISP, "Mask off bus error irq handler");
+			rc = cam_irq_controller_unsubscribe_irq(
+				bus_priv->common_data.bus_irq_controller,
+				bus_priv->error_irq_handle);
+			if (rc)
+				CAM_ERR(CAM_ISP,
+					"Failed to unsubscribe error irq rc=%d",
+					rc);
+
+			bus_priv->error_irq_handle = 0;
+		}
+		break;
+	case CAM_ISP_HW_CMD_UBWC_UPDATE:
+		rc = cam_vfe_bus_ver3_update_ubwc_config(cmd_args);
+		break;
+	default:
+		CAM_ERR_RATE_LIMIT(CAM_ISP, "Invalid camif process command:%d",
+			cmd_type);
+		break;
+	}
+
+	return rc;
+}
+
+int cam_vfe_bus_ver3_init(
+	struct cam_hw_soc_info               *soc_info,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus)
+{
+	int i, rc = 0;
+	struct cam_vfe_bus_ver3_priv    *bus_priv = NULL;
+	struct cam_vfe_bus              *vfe_bus_local;
+	struct cam_vfe_bus_ver3_hw_info *ver3_hw_info = bus_hw_info;
+
+	CAM_DBG(CAM_ISP, "Enter");
+
+	if (!soc_info || !hw_intf || !bus_hw_info || !vfe_irq_controller) {
+		CAM_ERR(CAM_ISP,
+			"Inval_prms soc_info:%pK hw_intf:%pK hw_info%pK",
+			soc_info, hw_intf, bus_hw_info);
+		CAM_ERR(CAM_ISP, "controller: %pK", vfe_irq_controller);
+		rc = -EINVAL;
+		goto end;
+	}
+
+	vfe_bus_local = kzalloc(sizeof(struct cam_vfe_bus), GFP_KERNEL);
+	if (!vfe_bus_local) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus");
+		rc = -ENOMEM;
+		goto end;
+	}
+
+	bus_priv = kzalloc(sizeof(struct cam_vfe_bus_ver3_priv),
+		GFP_KERNEL);
+	if (!bus_priv) {
+		CAM_DBG(CAM_ISP, "Failed to alloc for vfe_bus_priv");
+		rc = -ENOMEM;
+		goto free_bus_local;
+	}
+	vfe_bus_local->bus_priv = bus_priv;
+
+	bus_priv->num_client                     = ver3_hw_info->num_client;
+	bus_priv->num_out                        = ver3_hw_info->num_out;
+	bus_priv->common_data.num_sec_out        = 0;
+	bus_priv->common_data.secure_mode        = CAM_SECURE_MODE_NON_SECURE;
+	bus_priv->common_data.core_index         = soc_info->index;
+	bus_priv->common_data.mem_base           =
+		CAM_SOC_GET_REG_MAP_START(soc_info, VFE_CORE_BASE_IDX);
+	bus_priv->common_data.hw_intf            = hw_intf;
+	bus_priv->common_data.vfe_irq_controller = vfe_irq_controller;
+	bus_priv->common_data.common_reg         = &ver3_hw_info->common_reg;
+
+	mutex_init(&bus_priv->common_data.bus_mutex);
+
+	rc = cam_irq_controller_init(drv_name, bus_priv->common_data.mem_base,
+		&ver3_hw_info->common_reg.irq_reg_info,
+		&bus_priv->common_data.bus_irq_controller);
+	if (rc) {
+		CAM_ERR(CAM_ISP, "cam_irq_controller_init failed");
+		goto free_bus_priv;
+	}
+
+	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+	for (i = 0; i < bus_priv->num_client; i++) {
+		rc = cam_vfe_bus_ver3_init_wm_resource(i, bus_priv, bus_hw_info,
+			&bus_priv->bus_client[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init WM failed rc=%d", rc);
+			goto deinit_wm;
+		}
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER3_COMP_GRP_MAX; i++) {
+		rc = cam_vfe_bus_ver3_init_comp_grp(i, bus_priv, bus_hw_info,
+			&bus_priv->comp_grp[i]);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init Comp Grp failed rc=%d", rc);
+			goto deinit_comp_grp;
+		}
+	}
+
+	for (i = 0; i < bus_priv->num_out; i++) {
+		rc = cam_vfe_bus_ver3_init_vfe_out_resource(i, bus_priv,
+			bus_hw_info);
+		if (rc < 0) {
+			CAM_ERR(CAM_ISP, "Init VFE Out failed rc=%d", rc);
+			goto deinit_vfe_out;
+		}
+	}
+
+	spin_lock_init(&bus_priv->common_data.spin_lock);
+	INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+	for (i = 0; i < CAM_VFE_BUS_VER3_PAYLOAD_MAX; i++) {
+		INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+		list_add_tail(&bus_priv->common_data.evt_payload[i].list,
+			&bus_priv->common_data.free_payload_list);
+	}
+
+	vfe_bus_local->hw_ops.reserve      = cam_vfe_bus_ver3_acquire_vfe_out;
+	vfe_bus_local->hw_ops.release      = cam_vfe_bus_ver3_release_vfe_out;
+	vfe_bus_local->hw_ops.start        = cam_vfe_bus_ver3_start_hw;
+	vfe_bus_local->hw_ops.stop         = cam_vfe_bus_ver3_stop_hw;
+	vfe_bus_local->hw_ops.init         = cam_vfe_bus_ver3_init_hw;
+	vfe_bus_local->hw_ops.deinit       = cam_vfe_bus_ver3_deinit_hw;
+	vfe_bus_local->top_half_handler    = cam_vfe_bus_ver3_handle_irq;
+	vfe_bus_local->bottom_half_handler = NULL;
+	vfe_bus_local->hw_ops.process_cmd  = __cam_vfe_bus_ver3_process_cmd;
+
+	*vfe_bus = vfe_bus_local;
+
+	CAM_DBG(CAM_ISP, "Exit");
+	return rc;
+
+deinit_vfe_out:
+	if (i < 0)
+		i = CAM_VFE_BUS_VER3_VFE_OUT_MAX;
+	for (--i; i >= 0; i--)
+		cam_vfe_bus_ver3_deinit_vfe_out_resource(&bus_priv->vfe_out[i]);
+
+deinit_comp_grp:
+	if (i < 0)
+		i = CAM_VFE_BUS_VER3_COMP_GRP_MAX;
+	for (--i; i >= 0; i--)
+		cam_vfe_bus_ver3_deinit_comp_grp(&bus_priv->comp_grp[i]);
+
+deinit_wm:
+	if (i < 0)
+		i = bus_priv->num_client;
+	for (--i; i >= 0; i--)
+		cam_vfe_bus_ver3_deinit_wm_resource(&bus_priv->bus_client[i]);
+
+free_bus_priv:
+	kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+	kfree(vfe_bus_local);
+
+end:
+	return rc;
+}
+
+int cam_vfe_bus_ver3_deinit(
+	struct cam_vfe_bus                  **vfe_bus)
+{
+	int i, rc = 0;
+	struct cam_vfe_bus_ver3_priv    *bus_priv = NULL;
+	struct cam_vfe_bus              *vfe_bus_local;
+
+	if (!vfe_bus || !*vfe_bus) {
+		CAM_ERR(CAM_ISP, "Invalid input");
+		return -EINVAL;
+	}
+	vfe_bus_local = *vfe_bus;
+
+	bus_priv = vfe_bus_local->bus_priv;
+	if (!bus_priv) {
+		CAM_ERR(CAM_ISP, "bus_priv is NULL");
+		rc = -ENODEV;
+		goto free_bus_local;
+	}
+
+	INIT_LIST_HEAD(&bus_priv->common_data.free_payload_list);
+	for (i = 0; i < CAM_VFE_BUS_VER3_PAYLOAD_MAX; i++)
+		INIT_LIST_HEAD(&bus_priv->common_data.evt_payload[i].list);
+
+	for (i = 0; i < bus_priv->num_client; i++) {
+		rc = cam_vfe_bus_ver3_deinit_wm_resource(
+			&bus_priv->bus_client[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit WM failed rc=%d", rc);
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER3_COMP_GRP_MAX; i++) {
+		rc = cam_vfe_bus_ver3_deinit_comp_grp(&bus_priv->comp_grp[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit Comp Grp failed rc=%d", rc);
+	}
+
+	for (i = 0; i < CAM_VFE_BUS_VER3_VFE_OUT_MAX; i++) {
+		rc = cam_vfe_bus_ver3_deinit_vfe_out_resource(
+			&bus_priv->vfe_out[i]);
+		if (rc < 0)
+			CAM_ERR(CAM_ISP,
+				"Deinit VFE Out failed rc=%d", rc);
+	}
+
+	INIT_LIST_HEAD(&bus_priv->free_comp_grp);
+	INIT_LIST_HEAD(&bus_priv->used_comp_grp);
+
+	rc = cam_irq_controller_deinit(
+		&bus_priv->common_data.bus_irq_controller);
+	if (rc)
+		CAM_ERR(CAM_ISP,
+			"Deinit IRQ Controller failed rc=%d", rc);
+
+	mutex_destroy(&bus_priv->common_data.bus_mutex);
+	kfree(vfe_bus_local->bus_priv);
+
+free_bus_local:
+	kfree(vfe_bus_local);
+
+	*vfe_bus = NULL;
+
+	return rc;
+}
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
new file mode 100644
index 0000000..ece879a
--- /dev/null
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.h
@@ -0,0 +1,215 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+
+#ifndef _CAM_VFE_BUS_VER3_H_
+#define _CAM_VFE_BUS_VER3_H_
+
+#include "cam_irq_controller.h"
+#include "cam_vfe_bus.h"
+
+#define CAM_VFE_BUS_VER3_MAX_CLIENTS     26
+#define CAM_VFE_BUS_VER3_MAX_SUB_GRPS     6
+
+enum cam_vfe_bus_ver3_vfe_core_id {
+	CAM_VFE_BUS_VER3_VFE_CORE_0,
+	CAM_VFE_BUS_VER3_VFE_CORE_1,
+	CAM_VFE_BUS_VER3_VFE_CORE_MAX,
+};
+
+enum cam_vfe_bus_ver3_src_grp {
+	CAM_VFE_BUS_VER3_SRC_GRP_0,
+	CAM_VFE_BUS_VER3_SRC_GRP_1,
+	CAM_VFE_BUS_VER3_SRC_GRP_2,
+	CAM_VFE_BUS_VER3_SRC_GRP_3,
+	CAM_VFE_BUS_VER3_SRC_GRP_4,
+	CAM_VFE_BUS_VER3_SRC_GRP_5,
+	CAM_VFE_BUS_VER3_SRC_GRP_MAX,
+};
+
+enum cam_vfe_bus_ver3_comp_grp_type {
+	CAM_VFE_BUS_VER3_COMP_GRP_0,
+	CAM_VFE_BUS_VER3_COMP_GRP_1,
+	CAM_VFE_BUS_VER3_COMP_GRP_2,
+	CAM_VFE_BUS_VER3_COMP_GRP_3,
+	CAM_VFE_BUS_VER3_COMP_GRP_4,
+	CAM_VFE_BUS_VER3_COMP_GRP_5,
+	CAM_VFE_BUS_VER3_COMP_GRP_6,
+	CAM_VFE_BUS_VER3_COMP_GRP_7,
+	CAM_VFE_BUS_VER3_COMP_GRP_8,
+	CAM_VFE_BUS_VER3_COMP_GRP_9,
+	CAM_VFE_BUS_VER3_COMP_GRP_10,
+	CAM_VFE_BUS_VER3_COMP_GRP_11,
+	CAM_VFE_BUS_VER3_COMP_GRP_12,
+	CAM_VFE_BUS_VER3_COMP_GRP_13,
+	CAM_VFE_BUS_VER3_COMP_GRP_MAX,
+};
+
+enum cam_vfe_bus_ver3_vfe_out_type {
+	CAM_VFE_BUS_VER3_VFE_OUT_RDI0,
+	CAM_VFE_BUS_VER3_VFE_OUT_RDI1,
+	CAM_VFE_BUS_VER3_VFE_OUT_RDI2,
+	CAM_VFE_BUS_VER3_VFE_OUT_RDI3,
+	CAM_VFE_BUS_VER3_VFE_OUT_FULL,
+	CAM_VFE_BUS_VER3_VFE_OUT_DS4,
+	CAM_VFE_BUS_VER3_VFE_OUT_DS16,
+	CAM_VFE_BUS_VER3_VFE_OUT_RAW_DUMP,
+	CAM_VFE_BUS_VER3_VFE_OUT_FD,
+	CAM_VFE_BUS_VER3_VFE_OUT_PDAF,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BE,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_HDR_BHIST,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_TL_BG,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_BF,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_AWB_BG,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_BHIST,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_RS,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_CS,
+	CAM_VFE_BUS_VER3_VFE_OUT_STATS_IHIST,
+	CAM_VFE_BUS_VER3_VFE_OUT_FULL_DISP,
+	CAM_VFE_BUS_VER3_VFE_OUT_DS4_DISP,
+	CAM_VFE_BUS_VER3_VFE_OUT_DS16_DISP,
+	CAM_VFE_BUS_VER3_VFE_OUT_2PD,
+	CAM_VFE_BUS_VER3_VFE_OUT_LCR,
+	CAM_VFE_BUS_VER3_VFE_OUT_MAX,
+};
+
+/*
+ * struct cam_vfe_bus_ver3_reg_offset_common:
+ *
+ * @Brief:        Common registers across all BUS Clients
+ */
+struct cam_vfe_bus_ver3_reg_offset_common {
+	uint32_t hw_version;
+	uint32_t cgc_ovd;
+	uint32_t comp_cfg_0;
+	uint32_t comp_cfg_1;
+	uint32_t if_frameheader_cfg[CAM_VFE_BUS_VER3_MAX_SUB_GRPS];
+	uint32_t ubwc_static_ctrl;
+	uint32_t pwr_iso_cfg;
+	uint32_t overflow_status_clear;
+	uint32_t ccif_violation_status;
+	uint32_t overflow_status;
+	uint32_t image_size_violation_status;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t test_bus_ctrl;
+	struct cam_irq_controller_reg_info irq_reg_info;
+};
+
+/*
+ * struct cam_vfe_bus_ver3_reg_offset_ubwc_client:
+ *
+ * @Brief:        UBWC register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver3_reg_offset_ubwc_client {
+	uint32_t meta_addr;
+	uint32_t meta_cfg;
+	uint32_t mode_cfg;
+	uint32_t stats_ctrl;
+	uint32_t ctrl_2;
+	uint32_t bw_limit;
+};
+
+/*
+ * struct cam_vfe_bus_ver3_reg_offset_bus_client:
+ *
+ * @Brief:        Register offsets for BUS Clients
+ */
+struct cam_vfe_bus_ver3_reg_offset_bus_client {
+	uint32_t cfg;
+	uint32_t image_addr;
+	uint32_t frame_incr;
+	uint32_t image_cfg_0;
+	uint32_t image_cfg_1;
+	uint32_t image_cfg_2;
+	uint32_t packer_cfg;
+	uint32_t frame_header_addr;
+	uint32_t frame_header_incr;
+	uint32_t frame_header_cfg;
+	uint32_t irq_subsample_period;
+	uint32_t irq_subsample_pattern;
+	uint32_t framedrop_period;
+	uint32_t framedrop_pattern;
+	uint32_t burst_limit;
+	uint32_t system_cache_cfg;
+	void    *ubwc_regs;
+	uint32_t addr_status_0;
+	uint32_t addr_status_1;
+	uint32_t addr_status_2;
+	uint32_t addr_status_3;
+	uint32_t debug_status_cfg;
+	uint32_t debug_status_0;
+	uint32_t debug_status_1;
+	uint32_t source_group;
+	uint32_t comp_group;
+};
+
+/*
+ * struct cam_vfe_bus_ver3_vfe_out_hw_info:
+ *
+ * @Brief:        HW capability of VFE Bus Client
+ */
+struct cam_vfe_bus_ver3_vfe_out_hw_info {
+	enum cam_vfe_bus_ver3_vfe_out_type  vfe_out_type;
+	uint32_t                            max_width;
+	uint32_t                            max_height;
+};
+
+/*
+ * struct cam_vfe_bus_ver3_hw_info:
+ *
+ * @Brief:            HW register info for entire Bus
+ *
+ * @common_reg:       Common register details
+ * @bus_client_reg:   Bus client register info
+ * @comp_reg_grp:     Composite group register info
+ * @vfe_out_hw_info:  VFE output capability
+ */
+struct cam_vfe_bus_ver3_hw_info {
+	struct cam_vfe_bus_ver3_reg_offset_common common_reg;
+	uint32_t num_client;
+	struct cam_vfe_bus_ver3_reg_offset_bus_client
+		bus_client_reg[CAM_VFE_BUS_VER3_MAX_CLIENTS];
+	uint32_t num_out;
+	struct cam_vfe_bus_ver3_vfe_out_hw_info
+		vfe_out_hw_info[CAM_VFE_BUS_VER3_VFE_OUT_MAX];
+};
+
+/*
+ * cam_vfe_bus_ver3_init()
+ *
+ * @Brief:                   Initialize Bus layer
+ *
+ * @soc_info:                Soc Information for the associated HW
+ * @hw_intf:                 HW Interface of HW to which this resource belongs
+ * @bus_hw_info:             BUS HW info that contains details of BUS registers
+ * @vfe_irq_controller:      VFE IRQ Controller to use for subscribing to Top
+ *                           level IRQs
+ * @vfe_bus:                 Pointer to vfe_bus structure which will be filled
+ *                           and returned on successful initialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_bus_ver3_init(
+	struct cam_hw_soc_info               *soc_info,
+	struct cam_hw_intf                   *hw_intf,
+	void                                 *bus_hw_info,
+	void                                 *vfe_irq_controller,
+	struct cam_vfe_bus                  **vfe_bus);
+
+/*
+ * cam_vfe_bus_ver3_deinit()
+ *
+ * @Brief:                   Deinitialize Bus layer
+ *
+ * @vfe_bus:                 Pointer to vfe_bus structure to deinitialize
+ *
+ * @Return:                  0: Success
+ *                           Non-zero: Failure
+ */
+int cam_vfe_bus_ver3_deinit(struct cam_vfe_bus     **vfe_bus);
+
+#endif /* _CAM_VFE_BUS_VER3_H_ */
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
index 7c2ce0e..71228ba 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/include/cam_vfe_bus.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef _CAM_VFE_BUS_H_
@@ -12,8 +12,19 @@
 
 #define CAM_VFE_BUS_VER_1_0             0x1000
 #define CAM_VFE_BUS_VER_2_0             0x2000
+#define CAM_VFE_BUS_VER_3_0             0x3000
+
 #define CAM_VFE_BUS_RD_VER_4_0          0x4000
 
+#define CAM_VFE_ADD_REG_VAL_PAIR(buf_array, index, offset, val)    \
+	do {                                               \
+		buf_array[(index)++] = offset;             \
+		buf_array[(index)++] = val;                \
+	} while (0)
+
+#define ALIGNUP(value, alignment) \
+	((value + alignment - 1) / alignment * alignment)
+
 enum cam_vfe_bus_plane_type {
 	PLANE_Y,
 	PLANE_C,
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 7c36b9e..ac8c244 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -466,7 +466,8 @@
 	}
 
 	ch_ctx = &gsi_ctx->chan[ch_id];
-	if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI))
+	if (WARN_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ch_ctx->props.prot != GSI_CHAN_PROT_GCI))
 		return;
 
 	if (evt->type != GSI_XFER_COMPL_TYPE_GCI) {
@@ -493,7 +494,10 @@
 
 	ch_ctx->stats.completed++;
 
-	notify->xfer_user_data = ch_ctx->user_data[rp_idx];
+	WARN_ON(!ch_ctx->user_data[rp_idx].valid);
+	notify->xfer_user_data = ch_ctx->user_data[rp_idx].p;
+	ch_ctx->user_data[rp_idx].valid = false;
+
 	notify->chan_user_data = ch_ctx->props.chan_user_data;
 	notify->evt_id = evt->code;
 	notify->bytes_xfered = evt->len;
@@ -525,6 +529,7 @@
 {
 	uint32_t val;
 
+	ctx->ring.wp = ctx->ring.wp_local;
 	val = (ctx->ring.wp_local &
 			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_BMSK) <<
 			GSI_EE_n_EV_CH_k_DOORBELL_0_WRITE_PTR_LSB_SHFT;
@@ -1485,6 +1490,55 @@
 	return GSI_STATUS_SUCCESS;
 }
 
+/**
+ * gsi_cleanup_xfer_user_data: cleanup the user data array using callback passed
+ *	by IPA driver. Need to do this in GSI since only GSI knows which TRE
+ *	are being used or not. However, IPA is the one that does cleaning,
+ *	therefore we pass a callback from IPA and call it using params from GSI
+ *
+ * @chan_hdl: hdl of the gsi channel user data array to be cleaned
+ * @cleanup_cb: callback used to clean the user data array. takes 2 inputs
+ *	@chan_user_data: ipa_sys_context of the gsi_channel
+ *	@xfer_uder_data: user data array element (rx_pkt wrapper)
+ *
+ * Returns: 0 on success, negative on failure
+ */
+static int gsi_cleanup_xfer_user_data(unsigned long chan_hdl,
+	void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data))
+{
+	struct gsi_chan_ctx *ctx;
+	uint64_t i;
+	uint16_t rp_idx;
+
+	ctx = &gsi_ctx->chan[chan_hdl];
+	if (ctx->state != GSI_CHAN_STATE_ALLOCATED) {
+		GSIERR("bad state %d\n", ctx->state);
+		return -GSI_STATUS_UNSUPPORTED_OP;
+	}
+
+	/* for coalescing, traverse the whole array */
+	if (ctx->props.prot == GSI_CHAN_PROT_GCI) {
+		size_t user_data_size =
+			ctx->ring.max_num_elem + 1 + GSI_VEID_MAX;
+		for (i = 0; i < user_data_size; i++) {
+			if (ctx->user_data[i].valid)
+				cleanup_cb(ctx->props.chan_user_data,
+					ctx->user_data[i].p);
+		}
+	} else {
+		/* for non-coalescing, clean between RP and WP */
+		while (ctx->ring.rp_local != ctx->ring.wp_local) {
+			rp_idx = gsi_find_idx_from_addr(&ctx->ring,
+				ctx->ring.rp_local);
+			WARN_ON(!ctx->user_data[rp_idx].valid);
+			cleanup_cb(ctx->props.chan_user_data,
+				ctx->user_data[rp_idx].p);
+			gsi_incr_ring_rp(&ctx->ring);
+		}
+	}
+	return 0;
+}
+
 int gsi_alloc_evt_ring(struct gsi_evt_ring_props *props, unsigned long dev_hdl,
 		unsigned long *evt_ring_hdl)
 {
@@ -2163,7 +2217,8 @@
 	int ee;
 	enum gsi_ch_cmd_opcode op = GSI_CH_ALLOCATE;
 	uint8_t erindex;
-	void **user_data;
+	struct gsi_user_data *user_data;
+	size_t user_data_size;
 
 	if (!gsi_ctx) {
 		pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
@@ -2187,10 +2242,11 @@
 			return -GSI_STATUS_INVALID_PARAMS;
 		}
 
-		if (props->prot != GSI_CHAN_PROT_GCI &&
-			atomic_read(
+		if (atomic_read(
 			&gsi_ctx->evtr[props->evt_ring_hdl].chan_ref_cnt) &&
-			gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive) {
+			gsi_ctx->evtr[props->evt_ring_hdl].props.exclusive &&
+			gsi_ctx->evtr[props->evt_ring_hdl].chan->props.prot !=
+			GSI_CHAN_PROT_GCI) {
 			GSIERR("evt ring=%lu exclusively used by ch_hdl=%pK\n",
 				props->evt_ring_hdl, chan_hdl);
 			return -GSI_STATUS_UNSUPPORTED_OP;
@@ -2202,10 +2258,18 @@
 		GSIERR("chan %d already allocated\n", props->ch_id);
 		return -GSI_STATUS_NODEV;
 	}
-
 	memset(ctx, 0, sizeof(*ctx));
+	user_data_size = props->ring_len / props->re_size;
+	/*
+	 * GCI channels might have OOO event completions up to GSI_VEID_MAX.
+	 * user_data needs to be large enough to accommodate those.
+	 * TODO: increase user data size if GSI_VEID_MAX is not enough
+	 */
+	if (props->prot == GSI_CHAN_PROT_GCI)
+		user_data_size += GSI_VEID_MAX;
+
 	user_data = devm_kzalloc(gsi_ctx->dev,
-		(props->ring_len / props->re_size) * sizeof(void *),
+		user_data_size * sizeof(*user_data),
 		GFP_KERNEL);
 	if (user_data == NULL) {
 		GSIERR("context not allocated\n");
@@ -2256,7 +2320,9 @@
 	}
 	ctx->evtr = &gsi_ctx->evtr[erindex];
 	atomic_inc(&ctx->evtr->chan_ref_cnt);
-	if (props->prot != GSI_CHAN_PROT_GCI && ctx->evtr->props.exclusive)
+	if (props->prot != GSI_CHAN_PROT_GCI &&
+		ctx->evtr->props.exclusive &&
+		atomic_read(&ctx->evtr->chan_ref_cnt) == 1)
 		ctx->evtr->chan = ctx;
 
 	gsi_program_chan_ctx(props, gsi_ctx->per.ee, erindex);
@@ -2882,6 +2948,9 @@
 		goto reset;
 	}
 
+	if (ctx->props.cleanup_cb)
+		gsi_cleanup_xfer_user_data(chan_hdl, ctx->props.cleanup_cb);
+
 	gsi_program_chan_ctx(&ctx->props, gsi_ctx->per.ee,
 			ctx->evtr ? ctx->evtr->id : GSI_NO_EVT_ERINDEX);
 	gsi_init_chan_ring(&ctx->props, &ctx->ring);
@@ -3111,7 +3180,8 @@
 	ctx = &gsi_ctx->chan[chan_hdl];
 	ee = gsi_ctx->per.ee;
 
-	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
 		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3167,7 +3237,37 @@
 }
 EXPORT_SYMBOL(gsi_is_channel_empty);
 
-int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx, struct gsi_xfer_elem *xfer)
+int __gsi_get_gci_cookie(struct gsi_chan_ctx *ctx, uint16_t idx)
+{
+	int i;
+	int end;
+
+	if (!ctx->user_data[idx].valid) {
+		ctx->user_data[idx].valid = true;
+		return idx;
+	}
+
+	/*
+	 * at this point we need to find an "escape buffer" for the cookie
+	 * as the userdata in this spot is in use. This happens if the TRE at
+	 * idx is not completed yet and it is getting reused by a new TRE.
+	 */
+	ctx->stats.userdata_in_use++;
+	for (i = 0; i < GSI_VEID_MAX; i++) {
+		end = ctx->ring.max_num_elem + 1;
+		if (!ctx->user_data[end + i].valid) {
+			ctx->user_data[end + i].valid = true;
+			return end + i;
+		}
+	}
+
+	/* TODO: Increase escape buffer size if we hit this */
+	GSIERR("user_data is full\n");
+	return -EPERM;
+}
+
+int __gsi_populate_gci_tre(struct gsi_chan_ctx *ctx,
+	struct gsi_xfer_elem *xfer)
 {
 	struct gsi_gci_tre gci_tre;
 	struct gsi_gci_tre *tre_gci_ptr;
@@ -3193,11 +3293,13 @@
 	gci_tre.buffer_ptr = xfer->addr;
 	gci_tre.buf_len = xfer->len;
 	gci_tre.re_type = GSI_RE_COAL;
-	gci_tre.cookie = idx;
+	gci_tre.cookie = __gsi_get_gci_cookie(ctx, idx);
+	if (gci_tre.cookie < 0)
+		return -EPERM;
 
 	/* write the TRE to ring */
 	*tre_gci_ptr = gci_tre;
-	ctx->user_data[idx] = xfer->xfer_user_data;
+	ctx->user_data[idx].p = xfer->xfer_user_data;
 
 	return 0;
 }
@@ -3235,7 +3337,8 @@
 
 	/* write the TRE to ring */
 	*tre_ptr = tre;
-	ctx->user_data[idx] = xfer->xfer_user_data;
+	ctx->user_data[idx].valid = true;
+	ctx->user_data[idx].p = xfer->xfer_user_data;
 
 	return 0;
 }
@@ -3338,7 +3441,8 @@
 
 	ctx = &gsi_ctx->chan[chan_hdl];
 
-	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
 		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3393,7 +3497,8 @@
 	ctx = &gsi_ctx->chan[chan_hdl];
 	ee = gsi_ctx->per.ee;
 
-	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
 		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
@@ -3455,7 +3560,8 @@
 
 	ctx = &gsi_ctx->chan[chan_hdl];
 
-	if (ctx->props.prot != GSI_CHAN_PROT_GPI) {
+	if (ctx->props.prot != GSI_CHAN_PROT_GPI &&
+		ctx->props.prot != GSI_CHAN_PROT_GCI) {
 		GSIERR("op not supported for protocol %u\n", ctx->props.prot);
 		return -GSI_STATUS_UNSUPPORTED_OP;
 	}
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index f727206..b22c40e 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -121,14 +121,26 @@
 	unsigned long invalid_tre_error;
 	unsigned long poll_ok;
 	unsigned long poll_empty;
+	unsigned long userdata_in_use;
 	struct gsi_chan_dp_stats dp;
 };
 
+/**
+ * struct gsi_user_data - user_data element pointed by the TRE
+ * @valid: valid to be cleaned. if its true that means it is being used.
+ *	false means its free to overwrite
+ * @p: pointer to the user data array element
+ */
+struct gsi_user_data {
+	bool valid;
+	void *p;
+};
+
 struct gsi_chan_ctx {
 	struct gsi_chan_props props;
 	enum gsi_chan_state state;
 	struct gsi_ring_ctx ring;
-	void **user_data;
+	struct gsi_user_data *user_data;
 	struct gsi_evt_ctx *evtr;
 	struct mutex mlock;
 	struct completion compl;
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 1b0af66..03efc9c 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -278,6 +278,7 @@
 	if (ctx->evtr)
 		PRT_STAT("compl_evt=%lu\n",
 			ctx->evtr->stats.completed);
+	PRT_STAT("userdata_in_use=%lu\n", ctx->stats.userdata_in_use);
 
 	PRT_STAT("ch_below_lo=%lu\n", ctx->stats.dp.ch_below_lo);
 	PRT_STAT("ch_below_hi=%lu\n", ctx->stats.dp.ch_below_hi);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
index 2b94d85..892b831 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
  */
 
 
@@ -1089,11 +1089,9 @@
 	struct ipa3_dma_xfer_wrapper *xfer_descr_expected;
 	struct ipa3_sys_context *sys;
 	unsigned long flags;
-	struct ipa_mem_buffer *mem_info;
 
 	IPADMA_FUNC_ENTRY();
 
-	mem_info = (struct ipa_mem_buffer *)data;
 	ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
 	if (ep_idx < 0) {
 		IPADMA_ERR("IPA Client mapping failed\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index b2885b8..6d822c0 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -90,11 +90,12 @@
 static void ipa3_replenish_rx_work_func(struct work_struct *work);
 static void ipa3_fast_replenish_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_wq_handle_rx(struct work_struct *work);
-static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size);
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify);
 static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
-		struct ipa_mem_buffer *mem_info, uint32_t num);
+		struct gsi_chan_xfer_notify *notify, uint32_t num);
 static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
-				u32 size);
+				struct gsi_chan_xfer_notify *notify);
 static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		struct ipa3_sys_context *sys);
 static void ipa3_cleanup_rx(struct ipa3_sys_context *sys);
@@ -102,8 +103,7 @@
 static void ipa3_alloc_wlan_rx_common_cache(u32 size);
 static void ipa3_cleanup_wlan_rx_common_cache(void);
 static void ipa3_wq_repl_rx(struct work_struct *work);
-static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
-		struct ipa_mem_buffer *mem_info);
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys);
 static int ipa_gsi_setup_coal_def_channel(struct ipa_sys_connect_params *in,
 	struct ipa3_ep_context *ep, struct ipa3_ep_context *coal_ep);
 static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
@@ -119,9 +119,10 @@
 		struct ipa3_tx_pkt_wrapper *tx_pkt,
 		struct ipahal_imm_cmd_pyld **tag_pyld_ret);
 static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
-	struct ipa_mem_buffer *mem_info);
+	struct gsi_chan_xfer_notify *notify);
 static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
-	struct ipa_mem_buffer *mem_info, int expected_num, int *actual_num);
+	struct gsi_chan_xfer_notify *notify, int expected_num,
+	int *actual_num);
 static unsigned long tag_to_pointer_wa(uint64_t tag);
 static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt);
 
@@ -734,23 +735,23 @@
 {
 	int ret;
 	int cnt = 0;
-	struct ipa_mem_buffer mem_info = { 0 };
+	struct gsi_chan_xfer_notify notify = { 0 };
 
 	while ((in_poll_state ? atomic_read(&sys->curr_polling_state) :
 		!atomic_read(&sys->curr_polling_state))) {
 		if (cnt && !process_all)
 			break;
 
-		ret = ipa_poll_gsi_pkt(sys, &mem_info);
+		ret = ipa_poll_gsi_pkt(sys, &notify);
 		if (ret)
 			break;
 
 		if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(sys->ep->client))
-			ipa3_dma_memcpy_notify(sys, &mem_info);
+			ipa3_dma_memcpy_notify(sys);
 		else if (IPA_CLIENT_IS_WLAN_CONS(sys->ep->client))
-			ipa3_wlan_wq_rx_common(sys, mem_info.size);
+			ipa3_wlan_wq_rx_common(sys, &notify);
 		else
-			ipa3_wq_rx_common(sys, mem_info.size);
+			ipa3_wq_rx_common(sys, &notify);
 
 		++cnt;
 	}
@@ -911,7 +912,7 @@
 int ipa3_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
 {
 	struct ipa3_ep_context *ep;
-	int ipa_ep_idx;
+	int i, ipa_ep_idx;
 	int result = -EINVAL;
 	struct ipahal_reg_coal_qmap_cfg qmap_cfg;
 	struct ipahal_reg_coal_evict_lru evict_lru;
@@ -930,7 +931,7 @@
 	}
 
 	ipa_ep_idx = ipa3_get_ep_mapping(sys_in->client);
-	if (ipa_ep_idx == -1) {
+	if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
 		IPAERR("Invalid client.\n");
 		goto fail_gen;
 	}
@@ -1099,8 +1100,11 @@
 		}
 	}
 
-	if (IPA_CLIENT_IS_CONS(sys_in->client))
+	if (IPA_CLIENT_IS_CONS(sys_in->client)) {
 		ipa3_replenish_rx_cache(ep->sys);
+		for (i = 0; i < GSI_VEID_MAX; i++)
+			INIT_LIST_HEAD(&ep->sys->pending_pkts[i]);
+	}
 
 	if (IPA_CLIENT_IS_WLAN_CONS(sys_in->client)) {
 		ipa3_alloc_wlan_rx_common_cache(IPA_WLAN_COMM_RX_POOL_LOW);
@@ -1136,7 +1140,7 @@
 	IPADBG("client %d (ep: %d) connected sys=%pK\n", sys_in->client,
 			ipa_ep_idx, ep->sys);
 
-
+	/* configure the registers and setup the default pipe */
 	if (sys_in->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
 		evict_lru.coal_vp_lru_thrshld = 0;
 		evict_lru.coal_eviction_en = true;
@@ -1145,7 +1149,6 @@
 		qmap_cfg.mux_id_byte_sel = IPA_QMAP_ID_BYTE;
 		ipahal_write_reg_fields(IPA_COAL_QMAP_CFG, &qmap_cfg);
 
-		/* setup the default pipe */
 		sys_in->client = IPA_CLIENT_APPS_WAN_CONS;
 		sys_in->ipa_ep_cfg = ep_cfg_copy;
 		result = ipa_setup_coal_def_pipe(sys_in, ep);
@@ -1458,7 +1461,7 @@
 /**
  * ipa3_teardown_coal_def_pipe() - Teardown the APPS_WAN_COAL_CONS
  *				   default GPI pipe and cleanup IPA EP
- *				   called after the coalesced pipe is destroyed
+ *				   called after the coalesced pipe is destroyed.
  * @clnt_hdl:	[in] the handle obtained from ipa3_setup_sys_pipe
  *
  * Returns:	0 on success, negative on failure
@@ -1828,7 +1831,6 @@
 		if (!rx_pkt)
 			goto fail_kmem_cache_alloc;
 
-		INIT_LIST_HEAD(&rx_pkt->link);
 		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
 		rx_pkt->sys = sys;
 
@@ -1895,11 +1897,9 @@
 			if (ipa3_ctx->wc_memb.wlan_comm_free_cnt > 0)
 				ipa3_ctx->wc_memb.wlan_comm_free_cnt--;
 
-			INIT_LIST_HEAD(&rx_pkt->link);
 			rx_pkt->len = 0;
 			rx_pkt->sys = sys;
 
-			list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 			memset(&gsi_xfer_elem_one, 0,
 				sizeof(gsi_xfer_elem_one));
 			gsi_xfer_elem_one.addr = rx_pkt->data.dma_addr;
@@ -2064,7 +2064,6 @@
 		if (!rx_pkt)
 			goto fail_kmem_cache_alloc;
 
-		INIT_LIST_HEAD(&rx_pkt->link);
 		INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
 		rx_pkt->sys = sys;
 
@@ -2083,7 +2082,6 @@
 			goto fail_dma_mapping;
 		}
 
-		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
 		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
 		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
@@ -2158,7 +2156,6 @@
 			if (!rx_pkt)
 				goto fail_kmem_cache_alloc;
 
-			INIT_LIST_HEAD(&rx_pkt->link);
 			INIT_WORK(&rx_pkt->work, ipa3_wq_rx_avail);
 			rx_pkt->sys = sys;
 
@@ -2184,7 +2181,6 @@
 				struct ipa3_rx_pkt_wrapper, link);
 			list_del(&rx_pkt->link);
 			spin_unlock_bh(&sys->spinlock);
-			INIT_LIST_HEAD(&rx_pkt->link);
 			ptr = skb_put(rx_pkt->data.skb, sys->rx_buff_sz);
 			rx_pkt->data.dma_addr = dma_map_single(ipa3_ctx->pdev,
 				ptr, sys->rx_buff_sz, DMA_FROM_DEVICE);
@@ -2196,7 +2192,6 @@
 			}
 		}
 
-		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
 		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
 		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
@@ -2286,7 +2281,6 @@
 		if (curr == atomic_read(&sys->repl->tail_idx))
 			break;
 		rx_pkt = sys->repl->cache[curr];
-		list_add_tail(&rx_pkt->link, &sys->head_desc_list);
 		gsi_xfer_elem_array[idx].addr = rx_pkt->data.dma_addr;
 		gsi_xfer_elem_array[idx].len = sys->rx_buff_sz;
 		gsi_xfer_elem_array[idx].flags = GSI_XFER_FLAG_EOT;
@@ -2363,6 +2357,27 @@
 }
 
 /**
+ * free_rx_pkt() - function to free the skb and rx_pkt_wrapper
+ *
+ * @chan_user_data: ipa_sys_context used for skb size and skb_free func
+ * @xfer_uder_data: rx_pkt wrapper to be freed
+ *
+ */
+static void free_rx_pkt(void *chan_user_data, void *xfer_user_data)
+{
+
+	struct ipa3_rx_pkt_wrapper *rx_pkt = (struct ipa3_rx_pkt_wrapper *)
+		xfer_user_data;
+	struct ipa3_sys_context *sys = (struct ipa3_sys_context *)
+		chan_user_data;
+
+	dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+		sys->rx_buff_sz, DMA_FROM_DEVICE);
+	sys->free_skb(rx_pkt->data.skb);
+	kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
+}
+
+/**
  * ipa3_cleanup_rx() - release RX queue resources
  *
  */
@@ -2373,14 +2388,10 @@
 	u32 head;
 	u32 tail;
 
-	list_for_each_entry_safe(rx_pkt, r,
-				 &sys->head_desc_list, link) {
-		list_del(&rx_pkt->link);
-		dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
-			sys->rx_buff_sz, DMA_FROM_DEVICE);
-		sys->free_skb(rx_pkt->data.skb);
-		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
-	}
+	/*
+	 * buffers not consumed by gsi are cleaned up using cleanup callback
+	 * provided to gsi
+	 */
 
 	list_for_each_entry_safe(rx_pkt, r,
 				 &sys->rcycl_list, link) {
@@ -2391,7 +2402,7 @@
 		kmem_cache_free(ipa3_ctx->rx_pkt_wrapper_cache, rx_pkt);
 	}
 
-	if (sys->repl->cache) {
+	if (sys->repl) {
 		head = atomic_read(&sys->repl->head_idx);
 		tail = atomic_read(&sys->repl->tail_idx);
 		while (head != tail) {
@@ -2426,6 +2437,7 @@
 static int ipa3_lan_rx_pyld_hdlr(struct sk_buff *skb,
 		struct ipa3_sys_context *sys)
 {
+	int rc = 0;
 	struct ipahal_pkt_status status;
 	u32 pkt_status_sz;
 	struct sk_buff *skb2;
@@ -2443,7 +2455,7 @@
 
 	if (skb->len == 0) {
 		IPAERR("ZLT packet arrived to AP\n");
-		return 0;
+		goto out;
 	}
 
 	if (sys->len_partial) {
@@ -2503,7 +2515,7 @@
 				sys->prev_skb = skb2;
 			}
 			sys->len_rem -= skb->len;
-			return 0;
+			goto out;
 		}
 	}
 
@@ -2518,7 +2530,7 @@
 			IPADBG_LOW("status straddles buffer\n");
 			sys->prev_skb = skb_copy(skb, GFP_KERNEL);
 			sys->len_partial = skb->len;
-			return 0;
+			goto out;
 		}
 
 		ipahal_pkt_status_parse(skb->data, &status);
@@ -2567,7 +2579,7 @@
 				skb_pull(skb, pkt_status_sz);
 				if (skb->len < sizeof(comp)) {
 					IPAERR("TAG arrived without packet\n");
-					return 0;
+					goto out;
 				}
 				memcpy(&comp, skb->data, sizeof(comp));
 				skb_pull(skb, sizeof(comp));
@@ -2610,7 +2622,7 @@
 				IPADBG_LOW("Ins header in next buffer\n");
 				sys->prev_skb = skb_copy(skb, GFP_KERNEL);
 				sys->len_partial = skb->len;
-				return 0;
+				goto out;
 			}
 
 			pad_len_byte = ((status.pkt_len + 3) & ~3) -
@@ -2698,7 +2710,9 @@
 		tx_pkt = NULL;
 	}
 
-	return 0;
+out:
+	ipa3_skb_recycle(skb);
+	return rc;
 }
 
 static struct sk_buff *ipa3_join_prev_skb(struct sk_buff *prev_skb,
@@ -2976,103 +2990,181 @@
 static void ipa3_recycle_rx_wrapper(struct ipa3_rx_pkt_wrapper *rx_pkt)
 {
 	rx_pkt->data.dma_addr = 0;
-	ipa3_skb_recycle(rx_pkt->data.skb);
+	/* skb recycle was moved to pyld_hdlr */
 	INIT_LIST_HEAD(&rx_pkt->link);
 	spin_lock_bh(&rx_pkt->sys->spinlock);
 	list_add_tail(&rx_pkt->link, &rx_pkt->sys->rcycl_list);
 	spin_unlock_bh(&rx_pkt->sys->spinlock);
 }
 
-static void ipa3_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+/**
+ * handle_skb_completion()- Handle event completion EOB or EOT and prep the skb
+ *
+ * if eob: Set skb values, put rx_pkt at the end of the list and return NULL
+ *
+ * if eot: Set skb values, put skb at the end of the list. Then update the
+ * length and chain the skbs together while also freeing and unmapping the
+ * corresponding rx pkt. Once finished return the head_skb to be sent up the
+ * network stack.
+ */
+static struct sk_buff *handle_skb_completion(struct gsi_chan_xfer_notify
+		*notify, bool update_truesize)
 {
-	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
-	struct sk_buff *rx_skb;
+	struct ipa3_rx_pkt_wrapper *rx_pkt, *tmp;
+	struct sk_buff *rx_skb, *next_skb = NULL;
+	struct list_head *head;
+	struct ipa3_sys_context *sys;
 
-	if (unlikely(list_empty(&sys->head_desc_list))) {
-		WARN_ON(1);
-		return;
+	sys = (struct ipa3_sys_context *) notify->chan_user_data;
+	rx_pkt = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
+
+	spin_lock_bh(&rx_pkt->sys->spinlock);
+	rx_pkt->sys->len--;
+	spin_unlock_bh(&rx_pkt->sys->spinlock);
+
+	if (notify->bytes_xfered)
+		rx_pkt->len = notify->bytes_xfered;
+
+	rx_skb = rx_pkt->data.skb;
+	skb_set_tail_pointer(rx_skb, rx_pkt->len);
+	rx_skb->len = rx_pkt->len;
+
+	if (update_truesize) {
+		*(unsigned int *)rx_skb->cb = rx_skb->len;
+		rx_skb->truesize = rx_pkt->len + sizeof(struct sk_buff);
 	}
-	spin_lock_bh(&sys->spinlock);
-	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
-					   struct ipa3_rx_pkt_wrapper,
-					   link);
-	list_del(&rx_pkt_expected->link);
-	sys->len--;
-	if (size)
-		rx_pkt_expected->len = size;
-	spin_unlock_bh(&sys->spinlock);
-	rx_skb = rx_pkt_expected->data.skb;
-	dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
-			sys->rx_buff_sz, DMA_FROM_DEVICE);
-	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
-	rx_skb->len = rx_pkt_expected->len;
-	*(unsigned int *)rx_skb->cb = rx_skb->len;
-	rx_skb->truesize = rx_pkt_expected->len + sizeof(struct sk_buff);
-	sys->pyld_hdlr(rx_skb, sys);
-	sys->free_rx_wrapper(rx_pkt_expected);
-	sys->repl_hdlr(sys);
+
+	if (notify->veid >= GSI_VEID_MAX) {
+		WARN_ON(1);
+		return NULL;
+	}
+
+	head = &rx_pkt->sys->pending_pkts[notify->veid];
+
+	INIT_LIST_HEAD(&rx_pkt->link);
+	list_add_tail(&rx_pkt->link, head);
+
+	if (notify->evt_id == GSI_CHAN_EVT_EOT) {
+	/* go over the list backward to save computations on updating length */
+		list_for_each_entry_safe_reverse(rx_pkt, tmp, head, link) {
+			rx_skb = rx_pkt->data.skb;
+
+			list_del(&rx_pkt->link);
+			dma_unmap_single(ipa3_ctx->pdev, rx_pkt->data.dma_addr,
+				sys->rx_buff_sz, DMA_FROM_DEVICE);
+			sys->free_rx_wrapper(rx_pkt);
+
+			if (next_skb) {
+				skb_shinfo(rx_skb)->frag_list = next_skb;
+				rx_skb->len += next_skb->len;
+				rx_skb->data_len += next_skb->len;
+			}
+			next_skb = rx_skb;
+		}
+	} else {
+		return NULL;
+	}
+	return rx_skb;
+}
+
+static void ipa3_wq_rx_common(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify)
+{
+	struct sk_buff *rx_skb;
+	struct ipa3_sys_context *coal_sys;
+	int ipa_ep_idx;
+
+	rx_skb = handle_skb_completion(notify, true);
+
+	if (rx_skb) {
+		sys->pyld_hdlr(rx_skb, sys);
+
+		/* For coalescing, we have 2 transfer rings to replenish */
+		if (sys->ep->client == IPA_CLIENT_APPS_WAN_COAL_CONS) {
+			ipa_ep_idx = ipa3_get_ep_mapping(
+					IPA_CLIENT_APPS_WAN_CONS);
+
+			if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+				IPAERR("Invalid client.\n");
+				return;
+			}
+
+			coal_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
+			coal_sys->repl_hdlr(coal_sys);
+		}
+
+		sys->repl_hdlr(sys);
+	}
 }
 
 static void ipa3_wq_rx_napi_chain(struct ipa3_sys_context *sys,
-		struct ipa_mem_buffer *mem_info, uint32_t num)
+		struct gsi_chan_xfer_notify *notify, uint32_t num)
 {
-	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
+	struct ipa3_sys_context *wan_def_sys;
+	int i, ipa_ep_idx;
 	struct sk_buff *rx_skb, *first_skb = NULL, *prev_skb = NULL;
-	int i;
 
-	if (unlikely(list_empty(&sys->head_desc_list)) || !mem_info || !num) {
-		WARN_ON(1);
-		return;
+	/* non-coalescing case (SKB chaining enabled) */
+	if (sys->ep->client != IPA_CLIENT_APPS_WAN_COAL_CONS) {
+		for (i = 0; i < num; i++) {
+			rx_skb = handle_skb_completion(&notify[i], false);
+
+			/* this is always true for EOTs */
+			if (rx_skb) {
+				if (!first_skb)
+					first_skb = rx_skb;
+
+				if (prev_skb)
+					skb_shinfo(prev_skb)->frag_list =
+						rx_skb;
+
+				prev_skb = rx_skb;
+			}
+		}
+		if (prev_skb) {
+			skb_shinfo(prev_skb)->frag_list = NULL;
+			sys->pyld_hdlr(first_skb, sys);
+			sys->repl_hdlr(sys);
+		}
+
+	/* TODO: add chaining for coal case */
+	} else {
+		for (i = 0; i < num; i++) {
+			rx_skb = handle_skb_completion(&notify[i], false);
+
+			if (rx_skb) {
+				sys->pyld_hdlr(rx_skb, sys);
+
+			/*
+			 * For coalescing, we have 2 transfer rings to replenish
+			 */
+				ipa_ep_idx = ipa3_get_ep_mapping(
+						IPA_CLIENT_APPS_WAN_CONS);
+
+				if (ipa_ep_idx == IPA_EP_NOT_ALLOCATED) {
+					IPAERR("Invalid client.\n");
+					return;
+				}
+				wan_def_sys = ipa3_ctx->ep[ipa_ep_idx].sys;
+				wan_def_sys->repl_hdlr(wan_def_sys);
+				sys->repl_hdlr(sys);
+			}
+		}
 	}
-
-	for (i = 0; i < num; i++) {
-		spin_lock_bh(&sys->spinlock);
-		rx_pkt_expected = list_first_entry(&sys->head_desc_list,
-						   struct ipa3_rx_pkt_wrapper,
-						   link);
-		list_del(&rx_pkt_expected->link);
-		sys->len--;
-		if (mem_info[i].size)
-			rx_pkt_expected->len = mem_info[i].size;
-		spin_unlock_bh(&sys->spinlock);
-		rx_skb = rx_pkt_expected->data.skb;
-		dma_unmap_single(ipa3_ctx->pdev, rx_pkt_expected->data.dma_addr,
-				sys->rx_buff_sz, DMA_FROM_DEVICE);
-		skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
-		rx_skb->len = rx_pkt_expected->len;
-
-		if (!first_skb)
-			first_skb = rx_skb;
-
-		if (prev_skb)
-			skb_shinfo(prev_skb)->frag_list = rx_skb;
-
-		prev_skb = rx_skb;
-		sys->free_rx_wrapper(rx_pkt_expected);
-	}
-
-	skb_shinfo(prev_skb)->frag_list = NULL;
-	sys->pyld_hdlr(first_skb, sys);
-	sys->repl_hdlr(sys);
 }
 
-static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys, u32 size)
+static void ipa3_wlan_wq_rx_common(struct ipa3_sys_context *sys,
+	struct gsi_chan_xfer_notify *notify)
 {
 	struct ipa3_rx_pkt_wrapper *rx_pkt_expected;
 	struct sk_buff *rx_skb;
 
-	if (unlikely(list_empty(&sys->head_desc_list))) {
-		WARN_ON(1);
-		return;
-	}
-	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
-					   struct ipa3_rx_pkt_wrapper,
-					   link);
-	list_del(&rx_pkt_expected->link);
+	rx_pkt_expected = (struct ipa3_rx_pkt_wrapper *) notify->xfer_user_data;
+
 	sys->len--;
 
-	if (size)
-		rx_pkt_expected->len = size;
+	if (notify->bytes_xfered)
+		rx_pkt_expected->len = notify->bytes_xfered;
 
 	rx_skb = rx_pkt_expected->data.skb;
 	skb_set_tail_pointer(rx_skb, rx_pkt_expected->len);
@@ -3090,8 +3182,7 @@
 	ipa3_replenish_wlan_rx_cache(sys);
 }
 
-static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys,
-	struct ipa_mem_buffer *mem_info)
+static void ipa3_dma_memcpy_notify(struct ipa3_sys_context *sys)
 {
 	IPADBG_LOW("ENTER.\n");
 	if (unlikely(list_empty(&sys->head_desc_list))) {
@@ -3099,8 +3190,7 @@
 		WARN_ON(1);
 		return;
 	}
-	sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE,
-				(unsigned long)(mem_info));
+	sys->ep->client_notify(sys->ep->priv, IPA_RECEIVE, 0);
 	IPADBG_LOW("EXIT\n");
 }
 
@@ -3837,7 +3927,6 @@
 static void ipa_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
 {
 	struct ipa3_sys_context *sys;
-	struct ipa3_rx_pkt_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
 
 	if (!notify) {
 		IPAERR("gsi notify is NULL.\n");
@@ -3846,18 +3935,10 @@
 	IPADBG_LOW("event %d notified\n", notify->evt_id);
 
 	sys = (struct ipa3_sys_context *)notify->chan_user_data;
-	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
-					   struct ipa3_rx_pkt_wrapper, link);
-	rx_pkt_rcvd = (struct ipa3_rx_pkt_wrapper *)notify->xfer_user_data;
 
-	if (rx_pkt_expected != rx_pkt_rcvd) {
-		IPAERR("Pkt was not filled in head of rx buffer.\n");
-		WARN_ON(1);
-		return;
-	}
-	sys->ep->bytes_xfered_valid = true;
-	sys->ep->bytes_xfered = notify->bytes_xfered;
-	sys->ep->phys_base = rx_pkt_rcvd->data.dma_addr;
+
+	sys->ep->xfer_notify_valid = true;
+	sys->ep->xfer_notify = *notify;
 
 	switch (notify->evt_id) {
 	case GSI_CHAN_EVT_EOT:
@@ -3878,7 +3959,6 @@
 static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
 {
 	struct ipa3_sys_context *sys;
-	struct ipa3_dma_xfer_wrapper *rx_pkt_expected, *rx_pkt_rcvd;
 
 	if (!notify) {
 		IPAERR("gsi notify is NULL.\n");
@@ -3891,19 +3971,9 @@
 		IPAERR("IRQ_RX Callback was called for DMA_SYNC_CONS.\n");
 		return;
 	}
-	rx_pkt_expected = list_first_entry(&sys->head_desc_list,
-	struct ipa3_dma_xfer_wrapper, link);
-		rx_pkt_rcvd = (struct ipa3_dma_xfer_wrapper *)notify
-			->xfer_user_data;
-	if (rx_pkt_expected != rx_pkt_rcvd) {
-		IPAERR("Pkt was not filled in head of rx buffer.\n");
-		WARN_ON(1);
-		return;
-	}
 
-	sys->ep->bytes_xfered_valid = true;
-	sys->ep->bytes_xfered = notify->bytes_xfered;
-	sys->ep->phys_base = rx_pkt_rcvd->phys_addr_dest;
+	sys->ep->xfer_notify_valid = true;
+	sys->ep->xfer_notify = *notify;
 
 	switch (notify->evt_id) {
 	case GSI_CHAN_EVT_EOT:
@@ -4158,6 +4228,9 @@
 	if (IPA_CLIENT_IS_MEMCPY_DMA_CONS(ep->client))
 		gsi_channel_props.xfer_cb = ipa_dma_gsi_irq_rx_notify_cb;
 
+	if (IPA_CLIENT_IS_CONS(ep->client))
+		gsi_channel_props.cleanup_cb = free_rx_pkt;
+
 	result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
 		&ep->gsi_chan_hdl);
 	if (result != GSI_STATUS_SUCCESS) {
@@ -4270,23 +4343,20 @@
 }
 
 static int ipa_poll_gsi_pkt(struct ipa3_sys_context *sys,
-		struct ipa_mem_buffer *mem_info)
+		struct gsi_chan_xfer_notify *notify)
 {
 	int unused_var;
 
-	return ipa_poll_gsi_n_pkt(sys, mem_info, 1, &unused_var);
+	return ipa_poll_gsi_n_pkt(sys, notify, 1, &unused_var);
 }
 
 
 static int ipa_poll_gsi_n_pkt(struct ipa3_sys_context *sys,
-		struct ipa_mem_buffer *mem_info,
+		struct gsi_chan_xfer_notify *notify,
 		int expected_num, int *actual_num)
 {
 	int ret;
 	int idx = 0;
-	int i;
-	struct gsi_chan_xfer_notify xfer_notify[IPA_WAN_NAPI_MAX_FRAMES];
-	struct ipa3_rx_pkt_wrapper *rx_pkt;
 	int poll_num = 0;
 
 	if (!actual_num || expected_num <= 0 ||
@@ -4296,10 +4366,9 @@
 		return GSI_STATUS_INVALID_PARAMS;
 	}
 
-	if (sys->ep->bytes_xfered_valid) {
-		mem_info[idx].phys_base = sys->ep->phys_base;
-		mem_info[idx].size = (u32)sys->ep->bytes_xfered;
-		sys->ep->bytes_xfered_valid = false;
+	if (sys->ep->xfer_notify_valid) {
+		*notify = sys->ep->xfer_notify;
+		sys->ep->xfer_notify_valid = false;
 		idx++;
 	}
 	if (expected_num == idx) {
@@ -4308,7 +4377,7 @@
 	}
 
 	ret = gsi_poll_n_channel(sys->ep->gsi_chan_hdl,
-		xfer_notify, expected_num - idx, &poll_num);
+		&notify[idx], expected_num - idx, &poll_num);
 	if (ret == GSI_STATUS_POLL_EMPTY) {
 		if (idx) {
 			*actual_num = idx;
@@ -4326,12 +4395,6 @@
 		return ret;
 	}
 
-	for (i = 0; i < poll_num; i++) {
-		rx_pkt = (struct ipa3_rx_pkt_wrapper *)
-			xfer_notify[i].xfer_user_data;
-		mem_info[i+idx].phys_base = rx_pkt->data.dma_addr;
-		mem_info[i+idx].size = xfer_notify[i].bytes_xfered;
-	}
 	*actual_num = idx + poll_num;
 	return ret;
 }
@@ -4351,9 +4414,9 @@
 	int ret;
 	int cnt = 0;
 	int num = 0;
-	struct ipa_mem_buffer mem_info[IPA_WAN_NAPI_MAX_FRAMES];
 	int remain_aggr_weight;
 	struct ipa_active_client_logging_info log;
+	struct gsi_chan_xfer_notify notify[IPA_WAN_NAPI_MAX_FRAMES];
 
 	IPA_ACTIVE_CLIENTS_PREP_SPECIAL(log, "NAPI");
 
@@ -4378,17 +4441,17 @@
 			atomic_read(&ep->sys->curr_polling_state)) {
 		atomic_set(&ipa3_ctx->transport_pm.eot_activity, 1);
 		if (ipa3_ctx->enable_napi_chain) {
-			ret = ipa_poll_gsi_n_pkt(ep->sys, mem_info,
+			ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
 				remain_aggr_weight, &num);
 		} else {
-			ret = ipa_poll_gsi_n_pkt(ep->sys, mem_info,
+			ret = ipa_poll_gsi_n_pkt(ep->sys, notify,
 				1, &num);
 		}
 		if (ret)
 			break;
 
 		trace_ipa3_rx_poll_num(num);
-		ipa3_wq_rx_napi_chain(ep->sys, mem_info, num);
+		ipa3_wq_rx_napi_chain(ep->sys, notify, num);
 		remain_aggr_weight -= num;
 
 		trace_ipa3_rx_poll_cnt(ep->sys->len);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index c1c49d0..154875b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -770,9 +770,8 @@
 	unsigned long gsi_evt_ring_hdl;
 	struct ipa_gsi_ep_mem_info gsi_mem_info;
 	union __packed gsi_channel_scratch chan_scratch;
-	bool bytes_xfered_valid;
-	u16 bytes_xfered;
-	dma_addr_t phys_base;
+	struct gsi_chan_xfer_notify xfer_notify;
+	bool xfer_notify_valid;
 	struct ipa_ep_cfg cfg;
 	struct ipa_ep_cfg_holb holb;
 	struct ipahal_reg_ep_cfg_status status;
@@ -880,6 +879,7 @@
 	struct ipa3_repl_ctx *repl;
 	u32 pkt_sent;
 	struct napi_struct *napi_obj;
+	struct list_head pending_pkts[GSI_VEID_MAX];
 
 	/* ordering is important - mutable fields go above */
 	struct ipa3_ep_context *ep;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
index 639c2aa..69424c7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_mhi.c
@@ -527,9 +527,10 @@
 	struct Ipa3HwEventInfoData_t *evt_info_ptr;
 	u32 size;
 
-	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_MHI)) == 0) {
+	if ((uc_event_top_mmio->protocolMask & (1 << IPA_HW_FEATURE_MHI))
+		== 0) {
 		IPAERR("MHI feature missing 0x%x\n",
-			uc_event_top_mmio->featureMask);
+			uc_event_top_mmio->protocolMask);
 		return;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index 6670ee9..3a645d4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -8,44 +8,29 @@
 #define IPA_UC_NTN_DB_PA_TX 0x79620DC
 #define IPA_UC_NTN_DB_PA_RX 0x79620D8
 
-static void ipa3_uc_ntn_event_handler(struct IpaHwSharedMemCommonMapping_t
-				     *uc_sram_mmio)
-
-{
-	union Ipa3HwNTNErrorEventData_t ntn_evt;
-
-	if (uc_sram_mmio->eventOp ==
-		IPA_HW_2_CPU_EVENT_NTN_ERROR) {
-		ntn_evt.raw32b = uc_sram_mmio->eventParams;
-		IPADBG("uC NTN evt errType=%u pipe=%d cherrType=%u\n",
-			   ntn_evt.params.ntn_error_type,
-			   ntn_evt.params.ipa_pipe_number,
-			   ntn_evt.params.ntn_ch_err_type);
-	}
-}
-
 static void ipa3_uc_ntn_event_log_info_handler(
 struct IpaHwEventLogInfoData_t *uc_event_top_mmio)
 {
 	struct Ipa3HwEventInfoData_t *statsPtr = &uc_event_top_mmio->statsInfo;
 
-	if ((uc_event_top_mmio->featureMask & (1 << IPA_HW_FEATURE_NTN)) == 0) {
-		IPAERR("NTN feature missing 0x%x\n",
-			uc_event_top_mmio->featureMask);
+	if ((uc_event_top_mmio->protocolMask &
+		(1 << IPA_HW_PROTOCOL_ETH)) == 0) {
+		IPAERR("NTN protocol missing 0x%x\n",
+			uc_event_top_mmio->protocolMask);
 		return;
 	}
 
-	if (statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.size !=
+	if (statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size !=
 		sizeof(struct Ipa3HwStatsNTNInfoData_t)) {
 		IPAERR("NTN stats sz invalid exp=%zu is=%u\n",
 			sizeof(struct Ipa3HwStatsNTNInfoData_t),
-			statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.size);
+			statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.size);
 		return;
 	}
 
 	ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst =
 		uc_event_top_mmio->statsInfo.baseAddrOffset +
-		statsPtr->featureInfo[IPA_HW_FEATURE_NTN].params.offset;
+		statsPtr->featureInfo[IPA_HW_PROTOCOL_ETH].params.offset;
 	IPAERR("NTN stats ofst=0x%x\n", ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst);
 	if (ipa3_ctx->uc_ntn_ctx.ntn_uc_stats_ofst +
 		sizeof(struct Ipa3HwStatsNTNInfoData_t) >=
@@ -179,7 +164,6 @@
 {
 	struct ipa3_uc_hdlrs uc_ntn_cbs = { 0 };
 
-	uc_ntn_cbs.ipa_uc_event_hdlr = ipa3_uc_ntn_event_handler;
 	uc_ntn_cbs.ipa_uc_event_log_info_hdlr =
 		ipa3_uc_ntn_event_log_info_handler;
 	uc_ntn_cbs.ipa_uc_loaded_hdlr =
@@ -237,11 +221,11 @@
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
 		cmd_data_v4_0 = (struct IpaHwOffloadSetUpCmdData_t_v4_0 *)
 			cmd.base;
-		cmd_data_v4_0->protocol = IPA_HW_FEATURE_NTN;
+		cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
 		Ntn_params = &cmd_data_v4_0->SetupCh_params.NtnSetupCh_params;
 	} else {
 		cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
-		cmd_data->protocol = IPA_HW_FEATURE_NTN;
+		cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
 		Ntn_params = &cmd_data->SetupCh_params.NtnSetupCh_params;
 	}
 
@@ -569,11 +553,11 @@
 	if (ipa3_ctx->ipa_hw_type >= IPA_HW_v4_0) {
 		cmd_data_v4_0 = (struct IpaHwOffloadCommonChCmdData_t_v4_0 *)
 			cmd.base;
-		cmd_data_v4_0->protocol = IPA_HW_FEATURE_NTN;
+		cmd_data_v4_0->protocol = IPA_HW_PROTOCOL_ETH;
 		tear = &cmd_data_v4_0->CommonCh_params.NtnCommonCh_params;
 	} else {
 		cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
-		cmd_data->protocol = IPA_HW_FEATURE_NTN;
+		cmd_data->protocol = IPA_HW_PROTOCOL_ETH;
 		tear = &cmd_data->CommonCh_params.NtnCommonCh_params;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 7f32952..ea54936 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -19,9 +19,6 @@
 #define IPA_NTN_TX_DIR 1
 #define IPA_NTN_RX_DIR 2
 
-#define IPA_WDI3_TX_DIR 1
-#define IPA_WDI3_RX_DIR 2
-
 /**
  *  @brief   Enum value determined based on the feature it
  *           corresponds to
@@ -45,8 +42,9 @@
  * @IPA_HW_FEATURE_POWER_COLLAPSE: Feature related to IPA Power collapse
  * @IPA_HW_FEATURE_WDI : Feature related to WDI operation in IPA HW
  * @IPA_HW_FEATURE_NTN : Feature related to NTN operation in IPA HW
- * @IPA_HW_FEATURE_OFFLOAD : Feature related to NTN operation in IPA HW
- * @IPA_HW_FEATURE_WDI3 : Feature related to WDI operation in IPA HW
+ * @IPA_HW_FEATURE_OFFLOAD : Feature related to several protocols operation in
+ *				IPA HW. use protocol field to
+ *				 determine (e.g. IPA_HW_PROTOCOL_11ad).
  */
 enum ipa3_hw_features {
 	IPA_HW_FEATURE_COMMON		=	0x0,
@@ -56,11 +54,28 @@
 	IPA_HW_FEATURE_ZIP		=	0x4,
 	IPA_HW_FEATURE_NTN		=	0x5,
 	IPA_HW_FEATURE_OFFLOAD		=	0x6,
-	IPA_HW_FEATURE_WDI3		=	0x7,
 	IPA_HW_FEATURE_MAX		=	IPA_HW_NUM_FEATURES
 };
 
 /**
+* enum ipa4_hw_protocol - Values that represent the protocols supported
+* in IPA HW when using the IPA_HW_FEATURE_OFFLOAD feature.
+* @IPA_HW_FEATURE_COMMON : protocol related to common operation of IPA HW
+* @IPA_HW_PROTOCOL_AQC : protocol related to AQC operation in IPA HW
+* @IPA_HW_PROTOCOL_11ad: protocol related to 11ad operation in IPA HW
+* @IPA_HW_PROTOCOL_WDI : protocol related to WDI operation in IPA HW
+* @IPA_HW_PROTOCOL_ETH : protocol related to ETH operation in IPA HW
+*/
+enum ipa4_hw_protocol {
+	IPA_HW_PROTOCOL_COMMON = 0x0,
+	IPA_HW_PROTOCOL_AQC = 0x1,
+	IPA_HW_PROTOCOL_11ad = 0x2,
+	IPA_HW_PROTOCOL_WDI = 0x3,
+	IPA_HW_PROTOCOL_ETH = 0x5,
+	IPA_HW_PROTOCOL_MAX
+};
+
+/**
  * enum ipa3_hw_2_cpu_events - Values that represent HW event to be sent to CPU.
  * @IPA_HW_2_CPU_EVENT_NO_OP : No event present
  * @IPA_HW_2_CPU_EVENT_ERROR : Event specify a system error is detected by the
@@ -211,7 +226,7 @@
  * struct IpaHwEventLogInfoData_t - Structure holding the parameters for
  * IPA_HW_2_CPU_EVENT_LOG_INFO Event
  *
- * @featureMask : Mask indicating the features enabled in HW.
+ * @protocolMask : Mask indicating the protocols enabled in HW.
  * Refer IPA_HW_FEATURE_MASK
  * @circBuffBaseAddrOffset : Base Address Offset of the Circular Event
  * Log Buffer structure
@@ -223,7 +238,7 @@
  * Event
  */
 struct IpaHwEventLogInfoData_t {
-	u32 featureMask;
+	u32 protocolMask;
 	u32 circBuffBaseAddrOffset;
 	struct Ipa3HwEventInfoData_t statsInfo;
 	struct Ipa3HwEventInfoData_t configInfo;
@@ -245,29 +260,6 @@
 };
 
 /**
- * enum ipa3_hw_2_cpu_ntn_events - Values that represent HW event
- *			to be sent to CPU
- * @IPA_HW_2_CPU_EVENT_NTN_ERROR : Event to specify that HW
- *			detected an error in NTN
- *
- */
-enum ipa3_hw_2_cpu_ntn_events {
-	IPA_HW_2_CPU_EVENT_NTN_ERROR =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_NTN, 0),
-};
-
-
-/**
- * enum ipa3_hw_ntn_errors - NTN specific error types.
- * @IPA_HW_NTN_ERROR_NONE : No error persists
- * @IPA_HW_NTN_CHANNEL_ERROR : Error is specific to channel
- */
-enum ipa3_hw_ntn_errors {
-	IPA_HW_NTN_ERROR_NONE    = 0,
-	IPA_HW_NTN_CHANNEL_ERROR = 1
-};
-
-/**
  * enum ipa3_hw_ntn_channel_states - Values that represent NTN
  * channel state machine.
  * @IPA_HW_NTN_CHANNEL_STATE_INITED_DISABLED : Channel is
@@ -346,33 +338,6 @@
 
 } __packed;
 
-struct IpaHwWdi3SetUpCmdData_t {
-	u32  transfer_ring_base_pa;
-	u32  transfer_ring_base_pa_hi;
-
-	u32  transfer_ring_size;
-
-	u32  transfer_ring_doorbell_pa;
-	u32  transfer_ring_doorbell_pa_hi;
-
-	u32  event_ring_base_pa;
-	u32  event_ring_base_pa_hi;
-
-	u32  event_ring_size;
-
-	u32  event_ring_doorbell_pa;
-	u32  event_ring_doorbell_pa_hi;
-
-	u16  num_pkt_buffers;
-	u8   ipa_pipe_number;
-	u8   dir;
-
-	u16  pkt_offset;
-	u16  reserved0;
-
-	u32  desc_format_template[IPA_HW_WDI3_MAX_ER_DESC_SIZE];
-} __packed;
-
 /**
  * struct Ipa3HwNtnCommonChCmdData_t - Structure holding the
  * parameters for Ntn Tear down command data params
@@ -387,35 +352,6 @@
 	uint32_t raw32b;
 } __packed;
 
-union IpaHwWdi3CommonChCmdData_t {
-	struct IpaHwWdi3CommonChCmdParams_t {
-		u32  ipa_pipe_number :8;
-		u32  reserved        :24;
-	} __packed params;
-	u32 raw32b;
-} __packed;
-
-/**
- * struct Ipa3HwNTNErrorEventData_t - Structure holding the
- * IPA_HW_2_CPU_EVENT_NTN_ERROR event. The parameters are passed
- * as immediate params in the shared memory
- *
- *@ntn_error_type: type of NTN error (ipa3_hw_ntn_errors)
- *@ipa_pipe_number: IPA pipe number on which error has happened
- *   Applicable only if error type indicates channel error
- *@ntn_ch_err_type: Information about the channel error (if
- *		available)
- */
-union Ipa3HwNTNErrorEventData_t {
-	struct IpaHwNTNErrorEventParams_t {
-		u32  ntn_error_type  :8;
-		u32  reserved        :8;
-		u32  ipa_pipe_number :8;
-		u32  ntn_ch_err_type :8;
-	} __packed params;
-	uint32_t raw32b;
-} __packed;
-
 /**
  * struct NTN3RxInfoData_t - NTN Structure holding the Rx pipe
  * information
@@ -486,28 +422,18 @@
  *				Offload protocol's Tx/Rx Path
  * @IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN : Command to tear down
  *				Offload protocol's Tx/ Rx Path
- * @IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE : Command to enable
- *				Offload protocol's Tx/Rx Path
- * @IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE : Command to disable
- *				Offload protocol's Tx/ Rx Path
- * @IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND : Command to suspend
- *				Offload protocol's Tx/Rx Path
- * @IPA_CPU_2_HW_CMD_OFFLOAD_RESUME : Command to resume
- *				Offload protocol's Tx/ Rx Path
+ * @IPA_CPU_2_HW_CMD_PERIPHERAL_INIT :Command to initialize peripheral
+ * @IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT : Command to deinitialize peripheral
  */
 enum ipa_cpu_2_hw_offload_commands {
 	IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP  =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 1),
 	IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 2),
-	IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE  =
+	IPA_CPU_2_HW_CMD_PERIPHERAL_INIT =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 3),
-	IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE =
+	IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT =
 		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 4),
-	IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND  =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 5),
-	IPA_CPU_2_HW_CMD_OFFLOAD_RESUME =
-		FEATURE_ENUM_VAL(IPA_HW_FEATURE_OFFLOAD, 6),
 };
 
 
@@ -571,30 +497,70 @@
 };
 
 /**
- * struct IpaHwSetUpCmd  -
+ * struct IpaHw11adSetupCmdData_t  - 11ad setup channel command data
+ * @dir: Direction RX/TX
+ * @wifi_ch: 11ad peripheral pipe number
+ * @gsi_ch: GSI Channel number
+ * @reserved: 8 bytes padding
+ * @wifi_hp_addr_lsb: Head/Tail pointer absolute address
+ * @wifi_hp_addr_msb: Head/Tail pointer absolute address
+ */
+struct IpaHw11adSetupCmdData_t {
+	u8 dir;
+	u8 wifi_ch;
+	u8 gsi_ch;
+	u8 reserved;
+	u32 wifi_hp_addr_lsb;
+	u32 wifi_hp_addr_msb;
+} __packed;
+
+
+/**
+ * struct IpaHw11adCommonChCmdData_t - 11ad tear down channel command data
+ * @gsi_ch: GSI Channel number
+ * @reserved_0: padding
+ * @reserved_1: padding
+ */
+struct IpaHw11adCommonChCmdData_t {
+	u8 gsi_ch;
+	u8 reserved_0;
+	u16 reserved_1;
+} __packed;
+
+/**
+ * struct IpaHw11adInitCmdData_t - 11ad peripheral init command data
+ * @periph_baddr_lsb: Peripheral Base Address LSB (pa/IOVA)
+ * @periph_baddr_msb: Peripheral Base Address MSB (pa/IOVA)
+ */
+struct IpaHw11adInitCmdData_t {
+	u32 periph_baddr_lsb;
+	u32 periph_baddr_msb;
+} __packed;
+
+/**
+ * struct IpaHw11adDeinitCmdData_t - 11ad peripheral deinit command data
+ * @reserved: Reserved for future
+ */
+struct IpaHw11adDeinitCmdData_t {
+	u32 reserved;
+};
+
+/**
+ * struct IpaHwSetUpCmd  - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP
  *
  *
  */
 union IpaHwSetUpCmd {
 	struct Ipa3HwNtnSetUpCmdData_t NtnSetupCh_params;
-	struct IpaHwWdi3SetUpCmdData_t Wdi3SetupCh_params;
+	struct IpaHw11adSetupCmdData_t	W11AdSetupCh_params;
 } __packed;
 
-/**
- * struct IpaHwOffloadSetUpCmdData_t  -
- *
- *
- */
 struct IpaHwOffloadSetUpCmdData_t {
 	u8 protocol;
 	union IpaHwSetUpCmd SetupCh_params;
 } __packed;
 
-/**
- * struct IpaHwOffloadSetUpCmdData_t_v4_0  -
- *
- *
- */
 struct IpaHwOffloadSetUpCmdData_t_v4_0 {
 	u32 protocol;
 	union IpaHwSetUpCmd SetupCh_params;
@@ -608,7 +574,7 @@
  */
 union IpaHwCommonChCmd {
 	union Ipa3HwNtnCommonChCmdData_t NtnCommonCh_params;
-	union IpaHwWdi3CommonChCmdData_t Wdi3CommonCh_params;
+	struct IpaHw11adCommonChCmdData_t W11AdCommonCh_params;
 } __packed;
 
 struct IpaHwOffloadCommonChCmdData_t {
@@ -622,4 +588,33 @@
 } __packed;
 
 
+/**
+ * union IpaHwPeripheralInitCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_PERIPHERAL_INIT
+ *
+ */
+union IpaHwPeripheralInitCmd {
+	struct IpaHw11adInitCmdData_t W11AdInit_params;
+} __packed;
+
+struct IpaHwPeripheralInitCmdData_t {
+	u32 protocol;
+	union IpaHwPeripheralInitCmd Init_params;
+} __packed;
+
+/**
+ * union IpaHwPeripheralDeinitCmd - Structure holding the parameters
+ * for IPA_CPU_2_HW_CMD_PERIPHERAL_DEINIT
+ *
+ */
+union IpaHwPeripheralDeinitCmd {
+	struct IpaHw11adDeinitCmdData_t W11AdDeinit_params;
+} __packed;
+
+struct IpaHwPeripheralDeinitCmdData_t {
+	u32 protocol;
+	union IpaHwPeripheralDeinitCmd PeripheralDeinit_params;
+
+} __packed;
+
 #endif /* _IPA_UC_OFFLOAD_I_H_ */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
index b52e79b..1a3dfba 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_wdi.c
@@ -347,24 +347,25 @@
 {
 	struct Ipa3HwEventInfoData_t *stats_ptr = &uc_event_top_mmio->statsInfo;
 
-	if ((uc_event_top_mmio->featureMask &
-		(1 << IPA_HW_FEATURE_WDI)) == 0) {
-		IPAERR("WDI feature missing 0x%x\n",
-			uc_event_top_mmio->featureMask);
+	if ((uc_event_top_mmio->protocolMask &
+		(1 << IPA_HW_PROTOCOL_WDI)) == 0) {
+		IPAERR("WDI protocol missing 0x%x\n",
+			uc_event_top_mmio->protocolMask);
 		return;
 	}
 
-	if (stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.size !=
+	if (stats_ptr->featureInfo[IPA_HW_PROTOCOL_WDI].params.size !=
 		sizeof(struct IpaHwStatsWDIInfoData_t)) {
 		IPAERR("wdi stats sz invalid exp=%zu is=%u\n",
 			sizeof(struct IpaHwStatsWDIInfoData_t),
-			stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.size);
+			stats_ptr->featureInfo[
+				IPA_HW_PROTOCOL_WDI].params.size);
 		return;
 	}
 
 	ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst =
 		stats_ptr->baseAddrOffset +
-		stats_ptr->featureInfo[IPA_HW_FEATURE_WDI].params.offset;
+		stats_ptr->featureInfo[IPA_HW_PROTOCOL_WDI].params.offset;
 	IPAERR("WDI stats ofst=0x%x\n", ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst);
 	if (ipa3_ctx->uc_wdi_ctx.wdi_uc_stats_ofst +
 		sizeof(struct IpaHwStatsWDIInfoData_t) >=
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
index 1f639ce..151be9d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_wdi3_i.c
@@ -6,555 +6,36 @@
 #include "ipa_i.h"
 #include <linux/ipa_wdi3.h>
 
-#define IPA_HW_WDI3_RX_MBOX_START_INDEX 48
-#define IPA_HW_WDI3_TX_MBOX_START_INDEX 50
-
-static int ipa3_send_wdi3_setup_pipe_cmd(
-	u8 is_smmu_enabled, struct ipa_wdi_pipe_setup_info *info,
-	struct ipa_wdi_pipe_setup_info_smmu *info_smmu, u8 dir)
-{
-	int ipa_ep_idx;
-	int result = 0, len;
-	unsigned long va;
-	struct ipa_mem_buffer cmd;
-	struct IpaHwWdi3SetUpCmdData_t *wdi3_params;
-	struct IpaHwOffloadSetUpCmdData_t *cmd_data;
-
-	if (info == NULL || info_smmu == NULL) {
-		IPAERR("invalid input\n");
-		return -EINVAL;
-	}
-
-	cmd.size = sizeof(*cmd_data);
-	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
-			&cmd.phys_base, GFP_KERNEL);
-	if (cmd.base == NULL) {
-		IPAERR("fail to get DMA memory.\n");
-		return -ENOMEM;
-	}
-
-	cmd_data = (struct IpaHwOffloadSetUpCmdData_t *)cmd.base;
-	cmd_data->protocol = IPA_HW_FEATURE_WDI3;
-
-	if (!is_smmu_enabled) {
-		ipa_ep_idx = ipa_get_ep_mapping(info->client);
-		if (ipa_ep_idx == -1) {
-			IPAERR("fail to get ep idx.\n");
-			return -EFAULT;
-		}
-
-		IPADBG("client=%d ep=%d\n", info->client, ipa_ep_idx);
-		IPADBG("ring_base_pa = 0x%pad\n", &info->transfer_ring_base_pa);
-		IPADBG("ring_size = %hu\n", info->transfer_ring_size);
-		IPADBG("ring_db_pa = 0x%pad\n",
-			&info->transfer_ring_doorbell_pa);
-		IPADBG("evt_ring_base_pa = 0x%pad\n",
-			&info->event_ring_base_pa);
-		IPADBG("evt_ring_size = %hu\n", info->event_ring_size);
-		IPADBG("evt_ring_db_pa = 0x%pad\n",
-			&info->event_ring_doorbell_pa);
-		IPADBG("num_pkt_buffers = %hu\n", info->num_pkt_buffers);
-		IPADBG("pkt_offset = %d\n", info->pkt_offset);
-
-		wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params;
-		wdi3_params->transfer_ring_base_pa =
-			(u32)info->transfer_ring_base_pa;
-		wdi3_params->transfer_ring_base_pa_hi =
-			(u32)((u64)info->transfer_ring_base_pa >> 32);
-		wdi3_params->transfer_ring_size = info->transfer_ring_size;
-		wdi3_params->transfer_ring_doorbell_pa =
-			(u32)info->transfer_ring_doorbell_pa;
-		wdi3_params->transfer_ring_doorbell_pa_hi =
-			(u32)((u64)info->transfer_ring_doorbell_pa >> 32);
-		wdi3_params->event_ring_base_pa = (u32)info->event_ring_base_pa;
-		wdi3_params->event_ring_base_pa_hi =
-			(u32)((u64)info->event_ring_base_pa >> 32);
-		wdi3_params->event_ring_size = info->event_ring_size;
-		wdi3_params->event_ring_doorbell_pa =
-			(u32)info->event_ring_doorbell_pa;
-		wdi3_params->event_ring_doorbell_pa_hi =
-			(u32)((u64)info->event_ring_doorbell_pa >> 32);
-		wdi3_params->num_pkt_buffers = info->num_pkt_buffers;
-		wdi3_params->ipa_pipe_number = ipa_ep_idx;
-		wdi3_params->dir = dir;
-		wdi3_params->pkt_offset = info->pkt_offset;
-		memcpy(wdi3_params->desc_format_template,
-			info->desc_format_template,
-			sizeof(wdi3_params->desc_format_template));
-	} else {
-		ipa_ep_idx = ipa_get_ep_mapping(info_smmu->client);
-		if (ipa_ep_idx == -1) {
-			IPAERR("fail to get ep idx.\n");
-			return -EFAULT;
-		}
-
-		IPADBG("client=%d ep=%d\n", info_smmu->client, ipa_ep_idx);
-		IPADBG("ring_size = %hu\n", info_smmu->transfer_ring_size);
-		IPADBG("ring_db_pa = 0x%pad\n",
-			&info_smmu->transfer_ring_doorbell_pa);
-		IPADBG("evt_ring_size = %hu\n", info_smmu->event_ring_size);
-		IPADBG("evt_ring_db_pa = 0x%pad\n",
-			&info_smmu->event_ring_doorbell_pa);
-		IPADBG("num_pkt_buffers = %hu\n", info_smmu->num_pkt_buffers);
-		IPADBG("pkt_offset = %d\n", info_smmu->pkt_offset);
-
-		wdi3_params = &cmd_data->SetupCh_params.Wdi3SetupCh_params;
-
-		if (dir == IPA_WDI3_TX_DIR) {
-			len = info_smmu->transfer_ring_size;
-			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_RING_RES,
-				true, info->transfer_ring_base_pa,
-				&info_smmu->transfer_ring_base, len,
-				false, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->transfer_ring_base_pa = (u32)va;
-			wdi3_params->transfer_ring_base_pa_hi =
-				(u32)((u64)va >> 32);
-			wdi3_params->transfer_ring_size = len;
-
-			if (ipa_create_uc_smmu_mapping(IPA_WDI_TX_DB_RES,
-				true, info_smmu->transfer_ring_doorbell_pa,
-				NULL, 4, true, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->transfer_ring_doorbell_pa =
-				(u32)va;
-			wdi3_params->transfer_ring_doorbell_pa_hi =
-				(u32)((u64)va >> 32);
-
-			len = info_smmu->event_ring_size;
-			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_RING_RES,
-				true, info->event_ring_base_pa,
-				&info_smmu->event_ring_base, len,
-				false, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->event_ring_base_pa = (u32)va;
-			wdi3_params->event_ring_base_pa_hi =
-				(u32)((u64)va >> 32);
-			wdi3_params->event_ring_size = len;
-
-			if (ipa_create_uc_smmu_mapping(IPA_WDI_CE_DB_RES,
-				true, info_smmu->event_ring_doorbell_pa,
-				NULL, 4, true, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->event_ring_doorbell_pa =
-				(u32)va;
-			wdi3_params->event_ring_doorbell_pa_hi =
-				(u32)((u64)va >> 32);
-		} else {
-			len = info_smmu->transfer_ring_size;
-			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RES,
-				true, info->transfer_ring_base_pa,
-				&info_smmu->transfer_ring_base, len,
-				false, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->transfer_ring_base_pa = (u32)va;
-			wdi3_params->transfer_ring_base_pa_hi =
-				(u32)((u64)va >> 32);
-			wdi3_params->transfer_ring_size = len;
-
-			if (ipa_create_uc_smmu_mapping(IPA_WDI_RX_RING_RP_RES,
-				true, info_smmu->transfer_ring_doorbell_pa,
-				NULL, 4, true, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->transfer_ring_doorbell_pa =
-				(u32)va;
-			wdi3_params->transfer_ring_doorbell_pa_hi =
-				(u32)((u64)va >> 32);
-
-			len = info_smmu->event_ring_size;
-			if (ipa_create_uc_smmu_mapping(
-				IPA_WDI_RX_COMP_RING_RES, true,
-				info->event_ring_base_pa,
-				&info_smmu->event_ring_base, len,
-				false, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->event_ring_base_pa = (u32)va;
-			wdi3_params->event_ring_base_pa_hi =
-				(u32)((u64)va >> 32);
-			wdi3_params->event_ring_size = len;
-
-			if (ipa_create_uc_smmu_mapping(
-				IPA_WDI_RX_COMP_RING_WP_RES, true,
-				info_smmu->event_ring_doorbell_pa,
-				NULL, 4, true, &va)) {
-				IPAERR("failed to get smmu mapping\n");
-				return -EFAULT;
-			}
-			wdi3_params->event_ring_doorbell_pa =
-				(u32)va;
-			wdi3_params->event_ring_doorbell_pa_hi =
-				(u32)((u64)va >> 32);
-		}
-		wdi3_params->num_pkt_buffers = info_smmu->num_pkt_buffers;
-		wdi3_params->ipa_pipe_number = ipa_ep_idx;
-		wdi3_params->dir = dir;
-		wdi3_params->pkt_offset = info_smmu->pkt_offset;
-		memcpy(wdi3_params->desc_format_template,
-			info_smmu->desc_format_template,
-			sizeof(wdi3_params->desc_format_template));
-	}
-
-	result = ipa3_uc_send_cmd((u32)(cmd.phys_base),
-				IPA_CPU_2_HW_CMD_OFFLOAD_CHANNEL_SET_UP,
-				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
-				false, 10*HZ);
-	if (result) {
-		IPAERR("uc setup channel cmd failed: %d\n", result);
-		result = -EFAULT;
-	}
-
-	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
-	return result;
-}
-
 int ipa3_conn_wdi3_pipes(struct ipa_wdi_conn_in_params *in,
 	struct ipa_wdi_conn_out_params *out,
 	ipa_wdi_meter_notifier_cb wdi_notify)
 {
-	enum ipa_client_type rx_client;
-	enum ipa_client_type tx_client;
-	struct ipa3_ep_context *ep_rx;
-	struct ipa3_ep_context *ep_tx;
-	int ipa_ep_idx_rx;
-	int ipa_ep_idx_tx;
-	int result = 0;
+	IPAERR("wdi3 over uc offload not supported");
+	WARN_ON(1);
 
-	if (in == NULL || out == NULL) {
-		IPAERR("invalid input\n");
-		return -EINVAL;
-	}
-
-	if (in->is_smmu_enabled == false) {
-		rx_client = in->u_rx.rx.client;
-		tx_client = in->u_tx.tx.client;
-	} else {
-		rx_client = in->u_rx.rx_smmu.client;
-		tx_client = in->u_tx.tx_smmu.client;
-	}
-
-	ipa_ep_idx_rx = ipa_get_ep_mapping(rx_client);
-	ipa_ep_idx_tx = ipa_get_ep_mapping(tx_client);
-
-	if (ipa_ep_idx_rx == -1 || ipa_ep_idx_tx == -1) {
-		IPAERR("fail to alloc EP.\n");
-		return -EFAULT;
-	}
-	if (ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES ||
-		ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES) {
-		IPAERR("ep out of range.\n");
-		return -EFAULT;
-	}
-
-	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
-	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
-
-	if (ep_rx->valid || ep_tx->valid) {
-		IPAERR("EP already allocated.\n");
-		return -EFAULT;
-	}
-
-	memset(ep_rx, 0, offsetof(struct ipa3_ep_context, sys));
-	memset(ep_tx, 0, offsetof(struct ipa3_ep_context, sys));
-
-	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-
-#ifdef IPA_WAN_MSG_IPv6_ADDR_GW_LEN
-	if (wdi_notify)
-		ipa3_ctx->uc_wdi_ctx.stats_notify = wdi_notify;
-	else
-		IPADBG("wdi_notify is null\n");
-#endif
-
-	/* setup rx ep cfg */
-	ep_rx->valid = 1;
-	ep_rx->client = rx_client;
-	result = ipa3_disable_data_path(ipa_ep_idx_rx);
-	if (result) {
-		IPAERR("disable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_rx);
-		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-		return -EFAULT;
-	}
-	ep_rx->client_notify = in->notify;
-	ep_rx->priv = in->priv;
-
-	if (in->is_smmu_enabled == false)
-		memcpy(&ep_rx->cfg, &in->u_rx.rx.ipa_ep_cfg,
-			sizeof(ep_rx->cfg));
-	else
-		memcpy(&ep_rx->cfg, &in->u_rx.rx_smmu.ipa_ep_cfg,
-			sizeof(ep_rx->cfg));
-
-	if (ipa3_cfg_ep(ipa_ep_idx_rx, &ep_rx->cfg)) {
-		IPAERR("fail to setup rx pipe cfg\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	if (ipa3_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled,
-		&in->u_rx.rx, &in->u_rx.rx_smmu, IPA_WDI3_RX_DIR)) {
-		IPAERR("fail to send cmd to uc for rx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	ipa3_install_dflt_flt_rules(ipa_ep_idx_rx);
-	out->rx_uc_db_pa = ipa3_ctx->ipa_wrapper_base +
-		ipahal_get_reg_base() +
-		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
-		IPA_HW_WDI3_RX_MBOX_START_INDEX/32,
-		IPA_HW_WDI3_RX_MBOX_START_INDEX % 32);
-
-	IPADBG("client %d (ep: %d) connected\n", rx_client,
-		ipa_ep_idx_rx);
-
-	/* setup tx ep cfg */
-	ep_tx->valid = 1;
-	ep_tx->client = tx_client;
-	result = ipa3_disable_data_path(ipa_ep_idx_tx);
-	if (result) {
-		IPAERR("disable data path failed res=%d ep=%d.\n", result,
-			ipa_ep_idx_tx);
-		result = -EFAULT;
-		goto fail;
-	}
-
-	if (in->is_smmu_enabled == false)
-		memcpy(&ep_tx->cfg, &in->u_tx.tx.ipa_ep_cfg,
-			sizeof(ep_tx->cfg));
-	else
-		memcpy(&ep_tx->cfg, &in->u_tx.tx_smmu.ipa_ep_cfg,
-			sizeof(ep_tx->cfg));
-
-	if (ipa3_cfg_ep(ipa_ep_idx_tx, &ep_tx->cfg)) {
-		IPAERR("fail to setup tx pipe cfg\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	if (ipa3_send_wdi3_setup_pipe_cmd(in->is_smmu_enabled,
-		&in->u_tx.tx, &in->u_tx.tx_smmu, IPA_WDI3_TX_DIR)) {
-		IPAERR("fail to send cmd to uc for tx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	out->tx_uc_db_pa = ipa3_ctx->ipa_wrapper_base +
-		ipahal_get_reg_base() +
-		ipahal_get_reg_mn_ofst(IPA_UC_MAILBOX_m_n,
-		IPA_HW_WDI3_TX_MBOX_START_INDEX/32,
-		IPA_HW_WDI3_TX_MBOX_START_INDEX % 32);
-	IPADBG("client %d (ep: %d) connected\n", tx_client,
-		ipa_ep_idx_tx);
-
-fail:
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-	return result;
-}
-
-static int ipa3_send_wdi3_common_ch_cmd(int ipa_ep_idx, int command)
-{
-	struct ipa_mem_buffer cmd;
-	struct IpaHwOffloadCommonChCmdData_t *cmd_data;
-	union IpaHwWdi3CommonChCmdData_t *wdi3;
-	int result = 0;
-
-	cmd.size = sizeof(*cmd_data);
-	cmd.base = dma_alloc_coherent(ipa3_ctx->uc_pdev, cmd.size,
-		&cmd.phys_base, GFP_KERNEL);
-	if (cmd.base == NULL) {
-		IPAERR("fail to get DMA memory.\n");
-		return -ENOMEM;
-	}
-
-	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-	/* enable the TX pipe */
-	cmd_data = (struct IpaHwOffloadCommonChCmdData_t *)cmd.base;
-	cmd_data->protocol = IPA_HW_FEATURE_WDI3;
-
-	wdi3 = &cmd_data->CommonCh_params.Wdi3CommonCh_params;
-	wdi3->params.ipa_pipe_number = ipa_ep_idx;
-	result = ipa3_uc_send_cmd((u32)(cmd.phys_base), command,
-				IPA_HW_2_CPU_OFFLOAD_CMD_STATUS_SUCCESS,
-				false, 10*HZ);
-	if (result) {
-		result = -EFAULT;
-		goto fail;
-	}
-
-fail:
-	dma_free_coherent(ipa3_ctx->uc_pdev, cmd.size, cmd.base, cmd.phys_base);
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-	return result;
+	return -EFAULT;
 }
 
 int ipa3_disconn_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 {
-	struct ipa3_ep_context *ep_tx, *ep_rx;
-	int result = 0;
+	IPAERR("wdi3 over uc offload not supported");
+	WARN_ON(1);
 
-	IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
-	IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
-
-	if (ipa_ep_idx_tx < 0 || ipa_ep_idx_tx >= IPA3_MAX_NUM_PIPES ||
-		ipa_ep_idx_rx < 0 || ipa_ep_idx_rx >= IPA3_MAX_NUM_PIPES) {
-		IPAERR("invalid ipa ep index\n");
-		return -EINVAL;
-	}
-
-	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
-	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
-
-	/* tear down tx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) {
-		IPAERR("fail to tear down tx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	ipa3_disable_data_path(ipa_ep_idx_tx);
-	memset(ep_tx, 0, sizeof(struct ipa3_ep_context));
-	IPADBG("tx client (ep: %d) disconnected\n", ipa_ep_idx_tx);
-
-	/* tear down rx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_TEAR_DOWN)) {
-		IPAERR("fail to tear down rx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-	ipa3_disable_data_path(ipa_ep_idx_rx);
-	ipa3_delete_dflt_flt_rules(ipa_ep_idx_rx);
-	memset(ep_rx, 0, sizeof(struct ipa3_ep_context));
-	IPADBG("rx client (ep: %d) disconnected\n", ipa_ep_idx_rx);
-
-fail:
-	return result;
+	return -EFAULT;
 }
 
 int ipa3_enable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 {
-	struct ipa3_ep_context *ep_tx, *ep_rx;
-	int result = 0;
+	IPAERR("wdi3 over uc offload not supported");
+	WARN_ON(1);
 
-	IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
-	IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
-
-	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
-	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
-
-	/* enable tx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) {
-		IPAERR("fail to enable tx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	/* resume tx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) {
-		IPAERR("fail to resume tx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	/* enable rx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_ENABLE)) {
-		IPAERR("fail to enable rx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	/* resume rx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_RESUME)) {
-		IPAERR("fail to resume rx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	IPA_ACTIVE_CLIENTS_INC_SIMPLE();
-
-	/* enable data path */
-	result = ipa3_enable_data_path(ipa_ep_idx_rx);
-	if (result) {
-		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_rx);
-		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-		return -EFAULT;
-	}
-
-	result = ipa3_enable_data_path(ipa_ep_idx_tx);
-	if (result) {
-		IPAERR("enable data path failed res=%d clnt=%d.\n", result,
-			ipa_ep_idx_tx);
-		IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-		return -EFAULT;
-	}
-
-	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
-
-fail:
-	return result;
+	return -EFAULT;
 }
 
 int ipa3_disable_wdi3_pipes(int ipa_ep_idx_tx, int ipa_ep_idx_rx)
 {
-	struct ipa3_ep_context *ep_tx, *ep_rx;
-	int result = 0;
+	IPAERR("wdi3 over uc offload not supported");
+	WARN_ON(1);
 
-	IPADBG("ep_tx = %d\n", ipa_ep_idx_tx);
-	IPADBG("ep_rx = %d\n", ipa_ep_idx_rx);
-
-	ep_tx = &ipa3_ctx->ep[ipa_ep_idx_tx];
-	ep_rx = &ipa3_ctx->ep[ipa_ep_idx_rx];
-
-	/* suspend tx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) {
-		IPAERR("fail to suspend tx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	/* disable tx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_tx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) {
-		IPAERR("fail to disable tx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	/* suspend rx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_SUSPEND)) {
-		IPAERR("fail to suspend rx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-	/* disable rx pipe */
-	if (ipa3_send_wdi3_common_ch_cmd(ipa_ep_idx_rx,
-		IPA_CPU_2_HW_CMD_OFFLOAD_DISABLE)) {
-		IPAERR("fail to disable rx pipe\n");
-		result = -EFAULT;
-		goto fail;
-	}
-
-fail:
-	return result;
+	return -EFAULT;
 }
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig
index 08f7b5b..176ddba 100644
--- a/drivers/regulator/Kconfig
+++ b/drivers/regulator/Kconfig
@@ -1057,6 +1057,15 @@
 	  This driver provides support for the voltage regulators on the
 	  WM8994 CODEC.
 
+config REGULATOR_RPMH
+	tristate "Qualcomm Technologies, Inc. Legacy RPMh regulator driver"
+	depends on QCOM_RPMH
+	help
+	  This driver supports control of PMIC regulators via the RPMh hardware
+	  block found on Qualcomm Technologies Inc. SoCs.  RPMh regulator
+	  control allows for voting on regulator state between multiple
+	  processors within the SoC.
+
 config REGULATOR_STUB
 	tristate "Stub Regulator"
 	help
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile
index 7ab917f..0f960e9 100644
--- a/drivers/regulator/Makefile
+++ b/drivers/regulator/Makefile
@@ -134,6 +134,7 @@
 obj-$(CONFIG_REGULATOR_WM8400) += wm8400-regulator.o
 obj-$(CONFIG_REGULATOR_WM8994) += wm8994-regulator.o
 
+obj-$(CONFIG_REGULATOR_RPMH) += rpmh-regulator.o
 obj-$(CONFIG_REGULATOR_STUB) += stub-regulator.o
 
 ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG
diff --git a/drivers/regulator/rpmh-regulator.c b/drivers/regulator/rpmh-regulator.c
new file mode 100644
index 0000000..e6667c5
--- /dev/null
+++ b/drivers/regulator/rpmh-regulator.c
@@ -0,0 +1,2011 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/regulator/driver.h>
+#include <linux/regulator/machine.h>
+#include <linux/regulator/of_regulator.h>
+
+#include <soc/qcom/cmd-db.h>
+#include <soc/qcom/rpmh.h>
+
+#include <dt-bindings/regulator/qcom,rpmh-regulator-levels.h>
+
+/**
+ * enum rpmh_regulator_type - supported RPMh accelerator types
+ * %RPMH_REGULATOR_TYPE_VRM:	RPMh VRM accelerator which supports voting on
+ *				enable, voltage, mode, and headroom voltage of
+ *				LDO, SMPS, VS, and BOB type PMIC regulators.
+ * %RPMH_REGULATOR_TYPE_ARC:	RPMh ARC accelerator which supports voting on
+ *				the CPR managed voltage level of LDO and SMPS
+ *				type PMIC regulators.
+ * %RPMH_REGULATOR_TYPE_XOB:	RPMh XOB accelerator which supports voting on
+ *				the enable state of PMIC regulators.
+ */
+enum rpmh_regulator_type {
+	RPMH_REGULATOR_TYPE_VRM,
+	RPMH_REGULATOR_TYPE_ARC,
+	RPMH_REGULATOR_TYPE_XOB,
+};
+
+/**
+ * enum rpmh_regulator_hw_type - supported PMIC regulator hardware types
+ * This enum defines the specific regulator type along with its PMIC family.
+ */
+enum rpmh_regulator_hw_type {
+	RPMH_REGULATOR_HW_TYPE_UNKNOWN,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_LDO,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC4_BOB,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_LDO,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS,
+	RPMH_REGULATOR_HW_TYPE_PMIC5_BOB,
+	RPMH_REGULATOR_HW_TYPE_MAX,
+};
+
+/**
+ * enum rpmh_regulator_reg_index - RPMh accelerator register indices
+ * %RPMH_REGULATOR_REG_VRM_VOLTAGE:	VRM voltage voting register index
+ * %RPMH_REGULATOR_REG_ARC_LEVEL:	ARC voltage level voting register index
+ * %RPMH_REGULATOR_REG_VRM_ENABLE:	VRM enable voltage voting register index
+ * %RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE: Place-holder for enable aggregation.
+ *					ARC does not have a specific register
+ *					for enable voting.  Instead, ARC level
+ *					0 corresponds to "disabled" for a given
+ *					ARC regulator resource if supported.
+ * %RPMH_REGULATOR_REG_XOB_ENABLE:	XOB enable voting register index
+ * %RPMH_REGULATOR_REG_ENABLE:		Common enable index used in callback
+ *					functions for both ARC and VRM.
+ * %RPMH_REGULATOR_REG_VRM_MODE:	VRM regulator mode voting register index
+ * %RPMH_REGULATOR_REG_VRM_HEADROOM:	VRM headroom voltage voting register
+ *					index
+ * %RPMH_REGULATOR_REG_ARC_REAL_MAX:	Upper limit of real existent ARC
+ *					register indices
+ * %RPMH_REGULATOR_REG_ARC_MAX:		Exclusive upper limit of ARC register
+ *					indices
+ * %RPMH_REGULATOR_REG_XOB_MAX:		Exclusive upper limit of XOB register
+ *					indices
+ * %RPMH_REGULATOR_REG_VRM_MAX:		Exclusive upper limit of VRM register
+ *					indices
+ * %RPMH_REGULATOR_REG_MAX:		Combined exclusive upper limit of ARC
+ *					and VRM register indices
+ *
+ * Register addresses are calculated as: base_addr + sizeof(u32) * reg_index
+ */
+enum rpmh_regulator_reg_index {
+	RPMH_REGULATOR_REG_VRM_VOLTAGE		= 0,
+	RPMH_REGULATOR_REG_ARC_LEVEL		= 0,
+	RPMH_REGULATOR_REG_VRM_ENABLE		= 1,
+	RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE	= RPMH_REGULATOR_REG_VRM_ENABLE,
+	RPMH_REGULATOR_REG_XOB_ENABLE		= RPMH_REGULATOR_REG_VRM_ENABLE,
+	RPMH_REGULATOR_REG_ENABLE		= RPMH_REGULATOR_REG_VRM_ENABLE,
+	RPMH_REGULATOR_REG_VRM_MODE		= 2,
+	RPMH_REGULATOR_REG_VRM_HEADROOM		= 3,
+	RPMH_REGULATOR_REG_ARC_REAL_MAX		= 1,
+	RPMH_REGULATOR_REG_ARC_MAX		= 2,
+	RPMH_REGULATOR_REG_XOB_MAX		= 2,
+	RPMH_REGULATOR_REG_VRM_MAX		= 4,
+	RPMH_REGULATOR_REG_MAX			= 4,
+};
+
+/*
+ * This is the number of bytes used for each command DB aux data entry of an
+ * ARC resource.
+ */
+#define RPMH_ARC_LEVEL_SIZE		2
+
+/*
+ * This is the maximum number of voltage levels that may be defined for an ARC
+ * resource.
+ */
+#define RPMH_ARC_MAX_LEVELS		16
+
+#define RPMH_REGULATOR_LEVEL_OFF	0
+
+/* Min and max limits of VRM resource request parameters */
+#define RPMH_VRM_MIN_UV			0
+#define RPMH_VRM_MAX_UV			8191000
+
+#define RPMH_VRM_HEADROOM_MIN_UV	0
+#define RPMH_VRM_HEADROOM_MAX_UV	511000
+
+#define RPMH_VRM_MODE_MIN		0
+#define RPMH_VRM_MODE_MAX		7
+
+/* XOB voting registers are found in the VRM hardware module */
+#define CMD_DB_HW_XOB			CMD_DB_HW_VRM
+
+/**
+ * struct rpmh_regulator_request - rpmh request data
+ * @reg:			Array of RPMh accelerator register values
+ * @valid:			Bitmask identifying which of the register values
+ *				are valid/initialized
+ */
+struct rpmh_regulator_request {
+	u32				reg[RPMH_REGULATOR_REG_MAX];
+	u32				valid;
+};
+
+/**
+ * struct rpmh_regulator_mode - RPMh VRM mode attributes
+ * @pmic_mode:			Raw PMIC mode value written into VRM mode voting
+ *				register (i.e. RPMH_REGULATOR_MODE_*)
+ * @framework_mode:		Regulator framework mode value
+ *				(i.e. REGULATOR_MODE_*)
+ * @min_load_ua:		The minimum load current in microamps which
+ *				would utilize this mode
+ *
+ * Software selects the lowest mode for which aggr_load_ua >= min_load_ua.
+ */
+struct rpmh_regulator_mode {
+	u32				pmic_mode;
+	u32				framework_mode;
+	int				min_load_ua;
+};
+
+struct rpmh_vreg;
+
+/**
+ * struct rpmh_aggr_vreg - top level aggregated rpmh regulator resource data
+ *		structure
+ * @dev:			Device pointer to the rpmh aggregated regulator
+ *				device
+ * @resource_name:		Name of rpmh regulator resource which is mapped
+ *				to an RPMh accelerator address via command DB.
+ *				This name must match to one that is defined by
+ *				the bootloader.
+ * @addr:			Base address of the regulator resource within
+ *				an RPMh accelerator
+ * @lock:			Mutex lock used for locking between regulators
+ *				common to a single aggregated resource
+ * @regulator_type:		RPMh accelerator type for this regulator
+ *				resource
+ * @regulator_hw_type:		The regulator hardware type (e.g. LDO or SMPS)
+ *				along with PMIC family (i.e. PMIC4 or PMIC5)
+ * @level:			Mapping from ARC resource specific voltage
+ *				levels (0 to RPMH_ARC_MAX_LEVELS - 1) to common
+ *				consumer voltage levels (i.e.
+ *				RPMH_REGULATOR_LEVEL_*).  These values are read
+ *				out of the AUX data found in command DB for a
+ *				given ARC resource.
+ * @level_count:		The number of valid entries in the level array
+ * @always_wait_for_ack:	Boolean flag indicating if a request must always
+ *				wait for an ACK from RPMh before continuing even
+ *				if it corresponds to a strictly lower power
+ *				state (e.g. enabled --> disabled).
+ * @next_wait_for_ack:		Boolean flag indicating that the next request
+ *				sent must wait for an ACK.  This is used to
+ *				ensure that the driver waits for the voltage to
+ *				slew down in the case that the requested max_uV
+ *				value is lower than the last requested voltage.
+ * @sleep_request_sent:		Boolean flag indicating that a sleep set request
+ *				has been sent at some point due to it diverging
+ *				from the active set request.  After that point,
+ *				the sleep set requests must always be sent for
+ *				a given resource.
+ * @vreg:			Array of rpmh regulator structs representing the
+ *				individual regulators sharing the aggregated
+ *				regulator resource.
+ * @vreg_count:			The number of entries in the vreg array.
+ * @mode:			An array of modes supported by an RPMh VRM
+ *				regulator resource.
+ * @mode_count:			The number of entries in the mode array.
+ * @aggr_req_active:		Aggregated active set RPMh accelerator register
+ *				request
+ * @aggr_req_sleep:		Aggregated sleep set RPMh accelerator register
+ *				request
+ */
+struct rpmh_aggr_vreg {
+	struct device			*dev;
+	const char			*resource_name;
+	u32				addr;
+	struct mutex			lock;
+	enum rpmh_regulator_type	regulator_type;
+	enum rpmh_regulator_hw_type	regulator_hw_type;
+	u32				level[RPMH_ARC_MAX_LEVELS];
+	int				level_count;
+	bool				always_wait_for_ack;
+	bool				next_wait_for_ack;
+	bool				sleep_request_sent;
+	struct rpmh_vreg		*vreg;
+	int				vreg_count;
+	struct rpmh_regulator_mode	*mode;
+	int				mode_count;
+	struct rpmh_regulator_request	aggr_req_active;
+	struct rpmh_regulator_request	aggr_req_sleep;
+};
+
+/**
+ * struct rpmh_vreg - individual rpmh regulator data structure encapsulating a
+ *		regulator framework regulator device and its corresponding
+ *		rpmh request
+ * @of_node:			Device node pointer for the individual rpmh
+ *				regulator
+ * @name:			Name of the regulator
+ * @rdesc:			Regulator descriptor
+ * @rdev:			Regulator device pointer returned by
+ *				devm_regulator_register()
+ * @aggr_vreg:			Pointer to the aggregated rpmh regulator
+ *				resource
+ * @set_active:			Boolean flag indicating that requests made by
+ *				this regulator should take affect in the active
+ *				set
+ * @set_sleep:			Boolean flag indicating that requests made by
+ *				this regulator should take affect in the sleep
+ *				set
+ * @req:			RPMh accelerator register request
+ * @mode_index:			RPMh VRM regulator mode selected by index into
+ *				aggr_vreg->mode
+ */
+struct rpmh_vreg {
+	struct device_node		*of_node;
+	struct regulator_desc		rdesc;
+	struct regulator_dev		*rdev;
+	struct rpmh_aggr_vreg		*aggr_vreg;
+	bool				set_active;
+	bool				set_sleep;
+	struct rpmh_regulator_request	req;
+	int				mode_index;
+};
+
+#define RPMH_REGULATOR_MODE_COUNT		5
+
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_RM	4
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_LPM	5
+#define RPMH_REGULATOR_MODE_PMIC4_LDO_HPM	7
+
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_RM	4
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_PFM	5
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC4_SMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PASS	0
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PFM	1
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_AUTO	2
+#define RPMH_REGULATOR_MODE_PMIC4_BOB_PWM	3
+
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_LPM	4
+#define RPMH_REGULATOR_MODE_PMIC5_LDO_HPM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PFM	4
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_FTSMPS_RM	3
+#define RPMH_REGULATOR_MODE_PMIC5_FTSMPS_PWM	7
+
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PASS	2
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PFM	4
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_AUTO	6
+#define RPMH_REGULATOR_MODE_PMIC5_BOB_PWM	7
+
+/*
+ * Mappings from RPMh generic modes to VRM accelerator modes and regulator
+ * framework modes for each regulator type.
+ */
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_ldo[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_LPM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_LDO_HPM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_smps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_SMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic4_bob[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_PASS] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PASS,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC4_BOB_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_ldo[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_LPM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_LDO_HPM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_hfsmps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_HFSMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_ftsmps[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_RET] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_FTSMPS_RM,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_FTSMPS_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode
+rpmh_regulator_mode_map_pmic5_bob[RPMH_REGULATOR_MODE_COUNT] = {
+	[RPMH_REGULATOR_MODE_PASS] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PASS,
+		.framework_mode = REGULATOR_MODE_STANDBY,
+	},
+	[RPMH_REGULATOR_MODE_LPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PFM,
+		.framework_mode = REGULATOR_MODE_IDLE,
+	},
+	[RPMH_REGULATOR_MODE_AUTO] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_AUTO,
+		.framework_mode = REGULATOR_MODE_NORMAL,
+	},
+	[RPMH_REGULATOR_MODE_HPM] = {
+		.pmic_mode = RPMH_REGULATOR_MODE_PMIC5_BOB_PWM,
+		.framework_mode = REGULATOR_MODE_FAST,
+	},
+};
+
+static const struct rpmh_regulator_mode * const
+rpmh_regulator_mode_map[RPMH_REGULATOR_HW_TYPE_MAX] = {
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_LDO]
+		= rpmh_regulator_mode_map_pmic4_ldo,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS]
+		= rpmh_regulator_mode_map_pmic4_smps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS]
+		= rpmh_regulator_mode_map_pmic4_smps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC4_BOB]
+		= rpmh_regulator_mode_map_pmic4_bob,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_LDO]
+		= rpmh_regulator_mode_map_pmic5_ldo,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS]
+		= rpmh_regulator_mode_map_pmic5_hfsmps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS]
+		= rpmh_regulator_mode_map_pmic5_ftsmps,
+	[RPMH_REGULATOR_HW_TYPE_PMIC5_BOB]
+		= rpmh_regulator_mode_map_pmic5_bob,
+};
+
+/*
+ * This voltage in uV is returned by get_voltage functions when there is no way
+ * to determine the current voltage level.  It is needed because the regulator
+ * framework treats a 0 uV voltage as an error.
+ */
+#define VOLTAGE_UNKNOWN 1
+
+#define vreg_err(vreg, message, ...) \
+	pr_err("%s: " message, (vreg)->rdesc.name, ##__VA_ARGS__)
+#define vreg_info(vreg, message, ...) \
+	pr_info("%s: " message, (vreg)->rdesc.name, ##__VA_ARGS__)
+#define vreg_debug(vreg, message, ...) \
+	pr_debug("%s: " message, (vreg)->rdesc.name, ##__VA_ARGS__)
+
+#define aggr_vreg_err(aggr_vreg, message, ...) \
+	pr_err("%s: " message, (aggr_vreg)->resource_name, ##__VA_ARGS__)
+#define aggr_vreg_info(aggr_vreg, message, ...) \
+	pr_info("%s: " message, (aggr_vreg)->resource_name, ##__VA_ARGS__)
+#define aggr_vreg_debug(aggr_vreg, message, ...) \
+	pr_debug("%s: " message, (aggr_vreg)->resource_name, ##__VA_ARGS__)
+
+#define DEBUG_PRINT_BUFFER_SIZE 256
+static const char *const rpmh_regulator_state_names[] = {
+	[RPMH_SLEEP_STATE]		= "sleep ",
+	[RPMH_WAKE_ONLY_STATE]		= "wake  ",
+	[RPMH_ACTIVE_ONLY_STATE]	= "active",
+};
+
+static const char *const rpmh_regulator_vrm_param_names[] = {
+	[RPMH_REGULATOR_REG_VRM_VOLTAGE]	= "mv",
+	[RPMH_REGULATOR_REG_VRM_ENABLE]		= "en",
+	[RPMH_REGULATOR_REG_VRM_MODE]		= "mode",
+	[RPMH_REGULATOR_REG_VRM_HEADROOM]	= "hr_mv",
+};
+
+static const char *const rpmh_regulator_arc_param_names[] = {
+	[RPMH_REGULATOR_REG_ARC_LEVEL]		= "hlvl",
+};
+
+static const char *const rpmh_regulator_xob_param_names[] = {
+	[RPMH_REGULATOR_REG_XOB_ENABLE]		= "en",
+};
+
+static const int max_reg_index_map[] = {
+	[RPMH_REGULATOR_TYPE_VRM] = RPMH_REGULATOR_REG_VRM_MAX,
+	[RPMH_REGULATOR_TYPE_ARC] = RPMH_REGULATOR_REG_ARC_MAX,
+	[RPMH_REGULATOR_TYPE_XOB] = RPMH_REGULATOR_REG_XOB_MAX,
+};
+
+/**
+ * rpmh_regulator_get_max_reg_index() - return the number of registers
+ *		associated with the rpmh resource
+ * @aggr_vreg:		Pointer to the aggregated rpmh regulator resource
+ *
+ * Return: max number of registers for the aggr_vreg rpmh resource
+ */
+static int rpmh_regulator_get_max_reg_index(struct rpmh_aggr_vreg *aggr_vreg)
+{
+	if (aggr_vreg->regulator_type >= ARRAY_SIZE(max_reg_index_map))
+		return -EINVAL;
+	else
+		return max_reg_index_map[aggr_vreg->regulator_type];
+}
+
+/**
+ * rpmh_regulator_req() - print the rpmh regulator request to the kernel log
+ * @vreg:		Pointer to the RPMh regulator
+ * @current_req:	Pointer to the new request
+ * @prev_req:		Pointer to the last request
+ * @sent_mask:		Bitmask which specifies the parameters sent in this
+ *			request
+ * @state:		The rpmh state that the request was sent for
+ *
+ * Return: none
+ */
+static void rpmh_regulator_req(struct rpmh_vreg *vreg,
+		struct rpmh_regulator_request *current_req,
+		struct rpmh_regulator_request *prev_req,
+		u32 sent_mask,
+		enum rpmh_state state)
+{
+	struct rpmh_aggr_vreg *aggr_vreg = vreg->aggr_vreg;
+	char buf[DEBUG_PRINT_BUFFER_SIZE];
+	size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
+	const char *const *param_name;
+	int i, max_reg_index;
+	int pos = 0;
+	u32 valid;
+	bool first;
+
+	switch (aggr_vreg->regulator_type) {
+	case RPMH_REGULATOR_TYPE_VRM:
+		max_reg_index = RPMH_REGULATOR_REG_VRM_MAX;
+		param_name = rpmh_regulator_vrm_param_names;
+		break;
+	case RPMH_REGULATOR_TYPE_ARC:
+		max_reg_index = RPMH_REGULATOR_REG_ARC_REAL_MAX;
+		param_name = rpmh_regulator_arc_param_names;
+		break;
+	case RPMH_REGULATOR_TYPE_XOB:
+		max_reg_index = RPMH_REGULATOR_REG_XOB_MAX;
+		param_name = rpmh_regulator_xob_param_names;
+		break;
+	default:
+		return;
+	}
+
+	pos += scnprintf(buf + pos, buflen - pos,
+			"%s (%s), addr=0x%05X: s=%s; sent: ",
+			aggr_vreg->resource_name, vreg->rdesc.name,
+			aggr_vreg->addr, rpmh_regulator_state_names[state]);
+
+	valid = sent_mask;
+	first = true;
+	for (i = 0; i < max_reg_index; i++) {
+		if (valid & BIT(i)) {
+			pos += scnprintf(buf + pos, buflen - pos, "%s%s=%u",
+					(first ? "" : ", "), param_name[i],
+					current_req->reg[i]);
+			first = false;
+			if (aggr_vreg->regulator_type
+				== RPMH_REGULATOR_TYPE_ARC
+			    && i == RPMH_REGULATOR_REG_ARC_LEVEL)
+				pos += scnprintf(buf + pos, buflen - pos,
+					" (vlvl=%u)",
+					aggr_vreg->level[current_req->reg[i]]);
+		}
+	}
+
+	valid = prev_req->valid & ~sent_mask;
+
+	if (valid)
+		pos += scnprintf(buf + pos, buflen - pos, "; prev: ");
+	first = true;
+	for (i = 0; i < max_reg_index; i++) {
+		if (valid & BIT(i)) {
+			pos += scnprintf(buf + pos, buflen - pos, "%s%s=%u",
+					(first ? "" : ", "), param_name[i],
+					current_req->reg[i]);
+			first = false;
+			if (aggr_vreg->regulator_type
+				== RPMH_REGULATOR_TYPE_ARC
+			    && i == RPMH_REGULATOR_REG_ARC_LEVEL)
+				pos += scnprintf(buf + pos, buflen - pos,
+					" (vlvl=%u)",
+					aggr_vreg->level[current_req->reg[i]]);
+		}
+	}
+
+	pr_debug("%s\n", buf);
+}
+
+/**
+ * rpmh_regulator_handle_arc_enable() - handle masking of the voltage level
+ *		request based on the pseudo-enable value
+ * @aggr_vreg:		Pointer to the aggregated rpmh regulator resource
+ * @req			Pointer to the newly aggregated request
+ *
+ * Return: none
+ */
+static void rpmh_regulator_handle_arc_enable(struct rpmh_aggr_vreg *aggr_vreg,
+					     struct rpmh_regulator_request *req)
+{
+	if (aggr_vreg->regulator_type != RPMH_REGULATOR_TYPE_ARC)
+		return;
+
+	/*
+	 * Mask the voltage level if "off" level is supported and the regulator
+	 * has not been enabled.
+	 */
+	if (aggr_vreg->level[0] == RPMH_REGULATOR_LEVEL_OFF) {
+		if (req->valid & BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE)) {
+			if (!req->reg[RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE])
+				req->reg[RPMH_REGULATOR_REG_ARC_LEVEL] = 0;
+		} else {
+			/* Invalidate voltage level if enable is invalid. */
+			req->valid &= ~BIT(RPMH_REGULATOR_REG_ARC_LEVEL);
+		}
+	}
+
+	/*
+	 * Mark the pseudo enable bit as invalid so that it is not accidentally
+	 * included in an RPMh command.
+	 */
+	req->valid &= ~BIT(RPMH_REGULATOR_REG_ARC_PSEUDO_ENABLE);
+}
+
+/**
+ * rpmh_regulator_aggregate_requests() - aggregate the requests from all
+ *		regulators associated with an RPMh resource
+ * @aggr_vreg:		Pointer to the aggregated rpmh regulator resource
+ * @req_active:		Pointer to active set request output
+ * @req_sleep:		Pointer to sleep set request output
+ *
+ * This function aggregates the requests from the different regulators
+ * associated with the aggr_vreg resource independently in both the active set
+ * and sleep set.  The aggregated results are stored in req_active and
+ * req_sleep.
+ *
+ * Return: none
+ */
+static void rpmh_regulator_aggregate_requests(struct rpmh_aggr_vreg *aggr_vreg,
+				struct rpmh_regulator_request *req_active,
+				struct rpmh_regulator_request *req_sleep)
+{
+	int i, j, max_reg_index;
+
+	max_reg_index = rpmh_regulator_get_max_reg_index(aggr_vreg);
+	/*
+	 * Perform max aggregration of each register value across all regulators
+	 * which use this RPMh resource.
+	 */
+	for (i = 0; i < aggr_vreg->vreg_count; i++) {
+		if (aggr_vreg->vreg[i].set_active) {
+			for (j = 0; j < max_reg_index; j++)
+				req_active->reg[j] = max(req_active->reg[j],
+						aggr_vreg->vreg[i].req.reg[j]);
+			req_active->valid |= aggr_vreg->vreg[i].req.valid;
+		}
+		if (aggr_vreg->vreg[i].set_sleep) {
+			for (j = 0; j < max_reg_index; j++)
+				req_sleep->reg[j] = max(req_sleep->reg[j],
+						aggr_vreg->vreg[i].req.reg[j]);
+			req_sleep->valid |= aggr_vreg->vreg[i].req.valid;
+		}
+	}
+
+	rpmh_regulator_handle_arc_enable(aggr_vreg, req_active);
+	rpmh_regulator_handle_arc_enable(aggr_vreg, req_sleep);
+}
+
+/**
+ * rpmh_regulator_send_aggregate_requests() - aggregate the requests from all
+ *		regulators associated with an RPMh resource and send the request
+ *		to RPMh
+ * @vreg:		Pointer to the RPMh regulator
+ *
+ * This function aggregates the requests from the different regulators
+ * associated with the aggr_vreg resource independently in both the active set
+ * and sleep set.  The requests are only sent for the sleep set if they differ,
+ * or have differed in the past, from those of the active set.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int
+rpmh_regulator_send_aggregate_requests(struct rpmh_vreg *vreg)
+{
+	struct rpmh_aggr_vreg *aggr_vreg = vreg->aggr_vreg;
+	struct rpmh_regulator_request req_active = { {0} };
+	struct rpmh_regulator_request req_sleep = { {0} };
+	struct tcs_cmd cmd[RPMH_REGULATOR_REG_MAX] = { {0} };
+	bool sleep_set_differs = aggr_vreg->sleep_request_sent;
+	bool wait_for_ack = aggr_vreg->always_wait_for_ack
+				|| aggr_vreg->next_wait_for_ack;
+	bool resend_active = false;
+	int i, j, max_reg_index, rc;
+	enum rpmh_state state;
+	u32 sent_mask;
+
+	max_reg_index = rpmh_regulator_get_max_reg_index(aggr_vreg);
+
+	rpmh_regulator_aggregate_requests(aggr_vreg, &req_active, &req_sleep);
+
+	/*
+	 * Check if the aggregated sleep set parameter values differ from the
+	 * aggregated active set parameter values.
+	 */
+	if (!aggr_vreg->sleep_request_sent) {
+		for (i = 0; i < max_reg_index; i++) {
+			if ((req_active.reg[i] != req_sleep.reg[i])
+			    && (req_sleep.valid & BIT(i))) {
+				sleep_set_differs = true;
+				/*
+				 * Resend full active set request so that
+				 * all parameters are specified in the wake-only
+				 * state request.
+				 */
+				resend_active = true;
+				break;
+			}
+		}
+	}
+
+	if (sleep_set_differs) {
+		/*
+		 * Generate an rpmh command consisting of only those registers
+		 * which have new values or which have never been touched before
+		 * (i.e. those that were previously not valid).
+		 */
+		sent_mask = 0;
+		for (i = 0, j = 0; i < max_reg_index; i++) {
+			if ((req_sleep.valid & BIT(i))
+			    && (!(aggr_vreg->aggr_req_sleep.valid & BIT(i))
+				|| aggr_vreg->aggr_req_sleep.reg[i]
+					!= req_sleep.reg[i])) {
+				cmd[j].addr = aggr_vreg->addr + i * 4;
+				cmd[j].data = req_sleep.reg[i];
+				j++;
+				sent_mask |= BIT(i);
+			}
+		}
+
+		/* Send the rpmh command if any register values differ. */
+		if (j > 0) {
+			rc = rpmh_write_async(aggr_vreg->dev,
+					RPMH_SLEEP_STATE, cmd, j);
+			if (rc) {
+				aggr_vreg_err(aggr_vreg, "sleep state rpmh_write_async() failed, rc=%d\n",
+					rc);
+				return rc;
+			}
+			rpmh_regulator_req(vreg, &req_sleep,
+				&aggr_vreg->aggr_req_sleep,
+				sent_mask,
+				RPMH_SLEEP_STATE);
+			aggr_vreg->sleep_request_sent = true;
+			aggr_vreg->aggr_req_sleep = req_sleep;
+		}
+	}
+
+	/*
+	 * Generate an rpmh command consisting of only those registers
+	 * which have new values or which have never been touched before
+	 * (i.e. those that were previously not valid).
+	 */
+	sent_mask = 0;
+	for (i = 0, j = 0; i < max_reg_index; i++) {
+		if ((req_active.valid & BIT(i))
+		    && (!(aggr_vreg->aggr_req_active.valid & BIT(i))
+			|| aggr_vreg->aggr_req_active.reg[i]
+				!= req_active.reg[i] || resend_active)) {
+			cmd[j].addr = aggr_vreg->addr + i * 4;
+			cmd[j].data = req_active.reg[i];
+			cmd[j].wait = sleep_set_differs;
+			j++;
+			sent_mask |= BIT(i);
+
+			/*
+			 * Must wait for ACK from RPMh if power state is
+			 * increasing
+			 */
+			if (req_active.reg[i]
+			    > aggr_vreg->aggr_req_active.reg[i])
+				wait_for_ack = true;
+		}
+	}
+
+	/* Send the rpmh command if any register values differ. */
+	if (j > 0) {
+		if (sleep_set_differs) {
+			state = RPMH_WAKE_ONLY_STATE;
+			rc = rpmh_write_async(aggr_vreg->dev, state, cmd, j);
+			if (rc) {
+				aggr_vreg_err(aggr_vreg, "%s state rpmh_write_async() failed, rc=%d\n",
+					rpmh_regulator_state_names[state], rc);
+				return rc;
+			}
+			rpmh_regulator_req(vreg, &req_active,
+				&aggr_vreg->aggr_req_active, sent_mask, state);
+			for (i = 0; i < j; i++)
+				cmd[j].wait = false;
+		}
+
+		state = RPMH_ACTIVE_ONLY_STATE;
+		if (wait_for_ack)
+			rc = rpmh_write(aggr_vreg->dev, state, cmd, j);
+		else
+			rc = rpmh_write_async(aggr_vreg->dev, state,
+						cmd, j);
+		if (rc) {
+			aggr_vreg_err(aggr_vreg, "%s state rpmh_write() failed, rc=%d\n",
+				rpmh_regulator_state_names[state], rc);
+			return rc;
+		}
+		rpmh_regulator_req(vreg, &req_active,
+				&aggr_vreg->aggr_req_active, sent_mask, state);
+
+		aggr_vreg->aggr_req_active = req_active;
+		aggr_vreg->next_wait_for_ack = false;
+	}
+
+	return 0;
+}
+
+/**
+ * rpmh_regulator_set_reg() - set a register value within the request for an
+ *		RPMh regulator and return the previous value
+ * @vreg:		Pointer to the RPMh regulator
+ * @reg_index:		Index of the register value to update
+ * @value:		New register value to set
+ *
+ * Return: old register value
+ */
+static u32 rpmh_regulator_set_reg(struct rpmh_vreg *vreg, int reg_index,
+				u32 value)
+{
+	u32 old_value;
+
+	old_value = vreg->req.reg[reg_index];
+	vreg->req.reg[reg_index] = value;
+	vreg->req.valid |= BIT(reg_index);
+
+	return old_value;
+}
+
+/**
+ * rpmh_regulator_check_param_max() - sets if the next request must wait for
+ *		an ACK based on the previously sent reg[index] value and the new
+ *		max value
+ * @aggr_vreg:		Pointer to the aggregated rpmh regulator resource
+ * @index:		Register index
+ * @new_max:		Newly requested maximum allowed value for the parameter
+ *
+ * This function is used to handle the case when a consumer makes a new
+ * (min_uv, max_uv) range request in which the new max_uv is lower than the
+ * previously requested min_uv.  In this case, the driver must wait for an ACK
+ * from RPMh to ensure that the voltage has completed reducing to the new min_uv
+ * value since the consumer cannot operate at the old min_uv value.
+ *
+ * Return: none
+ */
+static void rpmh_regulator_check_param_max(struct rpmh_aggr_vreg *aggr_vreg,
+					int index, u32 new_max)
+{
+	if ((aggr_vreg->aggr_req_active.valid & BIT(index))
+	    && aggr_vreg->aggr_req_active.reg[index] > new_max)
+		aggr_vreg->next_wait_for_ack = true;
+}
+
+/**
+ * rpmh_regulator_is_enabled() - return the enable state of the RPMh
+ *		regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each rpmh-regulator device.
+ *
+ * Note that for ARC resources, this value is effectively a flag indicating if
+ * the requested voltage level is masked or unmasked since "disabled" = voltage
+ * level 0 (if supported).
+ *
+ * Return: true if regulator is enabled, false if regulator is disabled
+ */
+static int rpmh_regulator_is_enabled(struct regulator_dev *rdev)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return !!vreg->req.reg[RPMH_REGULATOR_REG_ENABLE];
+}
+
+/**
+ * rpmh_regulator_enable() - enable the RPMh regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each rpmh-regulator device.
+ *
+ * Note that for ARC devices the enable state is handled via the voltage level
+ * parameter.  Therefore, this enable value effectively masks or unmasks the
+ * enabled voltage level.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_enable(struct regulator_dev *rdev)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+	u32 prev_enable;
+	int rc;
+
+	mutex_lock(&vreg->aggr_vreg->lock);
+
+	prev_enable
+	       = rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_ENABLE, 1);
+
+	rc = rpmh_regulator_send_aggregate_requests(vreg);
+	if (rc) {
+		vreg_err(vreg, "enable failed, rc=%d\n", rc);
+		rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_ENABLE,
+					prev_enable);
+	}
+
+	mutex_unlock(&vreg->aggr_vreg->lock);
+
+	return rc;
+}
+
+/**
+ * rpmh_regulator_disable() - disable the RPMh regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each rpmh-regulator device.
+ *
+ * Note that for ARC devices the enable state is handled via the voltage level
+ * parameter.  Therefore, this enable value effectively masks or unmasks the
+ * enabled voltage level.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_disable(struct regulator_dev *rdev)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+	u32 prev_enable;
+	int rc;
+
+	mutex_lock(&vreg->aggr_vreg->lock);
+
+	prev_enable
+	       = rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_ENABLE, 0);
+
+	rc = rpmh_regulator_send_aggregate_requests(vreg);
+	if (rc) {
+		vreg_err(vreg, "disable failed, rc=%d\n", rc);
+		rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_ENABLE,
+					prev_enable);
+	}
+
+	mutex_unlock(&vreg->aggr_vreg->lock);
+
+	return rc;
+}
+
+/**
+ * rpmh_regulator_vrm_set_voltage() - set the voltage of the VRM rpmh-regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ * @min_uv:		New voltage in microvolts to set
+ * @max_uv:		Maximum voltage in microvolts allowed
+ * @selector:		Unused
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each VRM rpmh-regulator device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_vrm_set_voltage(struct regulator_dev *rdev,
+				int min_uv, int max_uv, unsigned int *selector)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+	u32 prev_voltage;
+	int mv;
+	int rc = 0;
+
+	mv = DIV_ROUND_UP(min_uv, 1000);
+	if (mv * 1000 > max_uv) {
+		vreg_err(vreg, "no set points available in range %d-%d uV\n",
+			min_uv, max_uv);
+		return -EINVAL;
+	}
+
+	mutex_lock(&vreg->aggr_vreg->lock);
+
+	prev_voltage
+	     = rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_VRM_VOLTAGE, mv);
+	rpmh_regulator_check_param_max(vreg->aggr_vreg,
+				RPMH_REGULATOR_REG_VRM_VOLTAGE, max_uv);
+
+	rc = rpmh_regulator_send_aggregate_requests(vreg);
+	if (rc) {
+		vreg_err(vreg, "set voltage=%d mV failed, rc=%d\n", mv, rc);
+		rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_VRM_VOLTAGE,
+					prev_voltage);
+	}
+
+	mutex_unlock(&vreg->aggr_vreg->lock);
+
+	return rc;
+}
+
+/**
+ * rpmh_regulator_vrm_get_voltage() - get the voltage of the VRM rpmh-regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each VRM rpmh-regulator device.
+ *
+ * Return: regulator voltage in microvolts
+ */
+static int rpmh_regulator_vrm_get_voltage(struct regulator_dev *rdev)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+	int uv;
+
+	uv = vreg->req.reg[RPMH_REGULATOR_REG_VRM_VOLTAGE] * 1000;
+	if (uv == 0)
+		uv = VOLTAGE_UNKNOWN;
+
+	return uv;
+}
+
+/**
+ * rpmh_regulator_vrm_set_mode_index() - set the mode of a VRM regulator to the
+ *		mode mapped to mode_index
+ * @vreg:		Pointer to the RPMh regulator
+ * @mode_index:		Index into aggr_vreg->mode[] array
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_vrm_set_mode_index(struct rpmh_vreg *vreg,
+					     int mode_index)
+{
+	u32 prev_mode;
+	int rc;
+
+	mutex_lock(&vreg->aggr_vreg->lock);
+
+	prev_mode = rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_VRM_MODE,
+				vreg->aggr_vreg->mode[mode_index].pmic_mode);
+
+	rc = rpmh_regulator_send_aggregate_requests(vreg);
+	if (rc) {
+		vreg_err(vreg, "set mode=%u failed, rc=%d\n",
+			vreg->req.reg[RPMH_REGULATOR_REG_VRM_MODE],
+			rc);
+		rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_VRM_MODE,
+					prev_mode);
+	} else {
+		vreg->mode_index = mode_index;
+	}
+
+	mutex_unlock(&vreg->aggr_vreg->lock);
+
+	return rc;
+}
+
+/**
+ * rpmh_regulator_vrm_set_mode() - set the mode of the VRM rpmh-regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ * @mode:		The regulator framework mode to set
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each VRM rpmh-regulator device.
+ *
+ * This function sets the PMIC mode corresponding to the specified framework
+ * mode.  The set of PMIC modes allowed is defined in device tree for a given
+ * RPMh regulator resource.  The full mapping from generic modes to PMIC modes
+ * and framework modes is defined in the rpmh_regulator_mode_map[] array.  The
+ * RPMh resource specific mapping is defined in the aggr_vreg->mode[] array.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_vrm_set_mode(struct regulator_dev *rdev,
+					unsigned int mode)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+	int i;
+
+	for (i = 0; i < vreg->aggr_vreg->mode_count; i++)
+		if (vreg->aggr_vreg->mode[i].framework_mode == mode)
+			break;
+	if (i >= vreg->aggr_vreg->mode_count) {
+		vreg_err(vreg, "invalid mode=%u\n", mode);
+		return -EINVAL;
+	}
+
+	return rpmh_regulator_vrm_set_mode_index(vreg, i);
+}
+
+/**
+ * rpmh_regulator_vrm_get_mode() - get the mode of the VRM rpmh-regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each VRM rpmh-regulator device.
+ *
+ * Return: the regulator framework mode of the regulator
+ */
+static unsigned int rpmh_regulator_vrm_get_mode(struct regulator_dev *rdev)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->aggr_vreg->mode[vreg->mode_index].framework_mode;
+}
+
+/**
+ * rpmh_regulator_vrm_set_load() - set the PMIC mode based upon the maximum load
+ *		required from the VRM rpmh-regulator
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ * @load_ua:		Maximum current required from all consumers in microamps
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each VRM rpmh-regulator device.
+ *
+ * This function sets the mode of the regulator to that which has the highest
+ * min support load less than or equal to load_ua.  Example:
+ *	mode_count = 3
+ *	mode[].min_load_ua = 0, 100000, 6000000
+ *
+ *	load_ua = 10000   --> mode_index = 0
+ *	load_ua = 250000  --> mode_index = 1
+ *	load_ua = 7000000 --> mode_index = 2
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_vrm_set_load(struct regulator_dev *rdev, int load_ua)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+	int i;
+
+	/* No need to check element 0 as it will be the default. */
+	for (i = vreg->aggr_vreg->mode_count - 1; i > 0; i--)
+		if (vreg->aggr_vreg->mode[i].min_load_ua <= load_ua)
+			break;
+
+	return rpmh_regulator_vrm_set_mode_index(vreg, i);
+}
+
+/**
+ * rpmh_regulator_arc_set_voltage_sel() - set the voltage level of the ARC
+ *		rpmh-regulator device
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ * @selector:		ARC voltage level to set
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each ARC rpmh-regulator device.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_arc_set_voltage_sel(struct regulator_dev *rdev,
+						unsigned int selector)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+	u32 prev_level;
+	int rc;
+
+	mutex_lock(&vreg->aggr_vreg->lock);
+
+	prev_level = rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_ARC_LEVEL,
+						selector);
+
+	rc = rpmh_regulator_send_aggregate_requests(vreg);
+	if (rc) {
+		vreg_err(vreg, "set level=%d failed, rc=%d\n",
+			vreg->req.reg[RPMH_REGULATOR_REG_ARC_LEVEL],
+			rc);
+		rpmh_regulator_set_reg(vreg, RPMH_REGULATOR_REG_ARC_LEVEL,
+					prev_level);
+	}
+
+	mutex_unlock(&vreg->aggr_vreg->lock);
+
+	return rc;
+}
+
+/**
+ * rpmh_regulator_arc_get_voltage_sel() - get the voltage level of the ARC
+ *		rpmh-regulator device
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each ARC rpmh-regulator device.
+ *
+ * Return: ARC voltage level
+ */
+static int rpmh_regulator_arc_get_voltage_sel(struct regulator_dev *rdev)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+	return vreg->req.reg[RPMH_REGULATOR_REG_ARC_LEVEL];
+}
+
+/**
+ * rpmh_regulator_arc_list_voltage() - return the consumer voltage level mapped
+ *		to a given ARC voltage level
+ * @rdev:		Regulator device pointer for the rpmh-regulator
+ * @selector:		ARC voltage level
+ *
+ * This function is passed as a callback function into the regulator ops that
+ * are registered for each ARC rpmh-regulator device.
+ *
+ * Data ranges:
+ * ARC voltage level:      0 - 15 (fixed in hardware)
+ * Consumer voltage level: 1 - 513 (could be expanded to larger values)
+ *
+ * Return: consumer voltage level
+ */
+static int rpmh_regulator_arc_list_voltage(struct regulator_dev *rdev,
+						unsigned int selector)
+{
+	struct rpmh_vreg *vreg = rdev_get_drvdata(rdev);
+
+	if (selector >= vreg->aggr_vreg->level_count)
+		return 0;
+
+	return vreg->aggr_vreg->level[selector];
+}
+
+static const struct regulator_ops rpmh_regulator_vrm_ops = {
+	.enable			= rpmh_regulator_enable,
+	.disable		= rpmh_regulator_disable,
+	.is_enabled		= rpmh_regulator_is_enabled,
+	.set_voltage		= rpmh_regulator_vrm_set_voltage,
+	.get_voltage		= rpmh_regulator_vrm_get_voltage,
+	.set_mode		= rpmh_regulator_vrm_set_mode,
+	.get_mode		= rpmh_regulator_vrm_get_mode,
+	.set_load		= rpmh_regulator_vrm_set_load,
+};
+
+static const struct regulator_ops rpmh_regulator_arc_ops = {
+	.enable			= rpmh_regulator_enable,
+	.disable		= rpmh_regulator_disable,
+	.is_enabled		= rpmh_regulator_is_enabled,
+	.set_voltage_sel	= rpmh_regulator_arc_set_voltage_sel,
+	.get_voltage_sel	= rpmh_regulator_arc_get_voltage_sel,
+	.list_voltage		= rpmh_regulator_arc_list_voltage,
+};
+
+static const struct regulator_ops rpmh_regulator_xob_ops = {
+	.enable			= rpmh_regulator_enable,
+	.disable		= rpmh_regulator_disable,
+	.is_enabled		= rpmh_regulator_is_enabled,
+};
+
+static const struct regulator_ops *rpmh_regulator_ops[] = {
+	[RPMH_REGULATOR_TYPE_VRM]	= &rpmh_regulator_vrm_ops,
+	[RPMH_REGULATOR_TYPE_ARC]	= &rpmh_regulator_arc_ops,
+	[RPMH_REGULATOR_TYPE_XOB]	= &rpmh_regulator_xob_ops,
+};
+
+/**
+ * rpmh_regulator_load_arc_level_mapping() - load the RPMh ARC resource's
+ *		voltage level mapping from command db
+ * @aggr_vreg:		Pointer to the aggregated rpmh regulator resource
+ *
+ * The set of supported RPMH_REGULATOR_LEVEL_* voltage levels (0 - ~512) that
+ * map to ARC operating levels (0 - 15) is defined in aux data per ARC resource
+ * in the command db SMEM data structure.  It is in a u16 array with 1 to 16
+ * elements.  Note that the aux data array may be zero padded at the end for
+ * data alignment purposes.  Such padding entries are invalid and must be
+ * ignored.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int
+rpmh_regulator_load_arc_level_mapping(struct rpmh_aggr_vreg *aggr_vreg)
+{
+	int i, j, len, rc;
+	u8 *buf;
+
+	len = cmd_db_read_aux_data_len(aggr_vreg->resource_name);
+	if (len < 0) {
+		aggr_vreg_err(aggr_vreg, "could not get ARC aux data len, rc=%d\n",
+			len);
+		return len;
+	} else if (len == 0) {
+		aggr_vreg_err(aggr_vreg, "ARC level mapping data missing in command db\n");
+		return -EINVAL;
+	} else if (len > RPMH_ARC_MAX_LEVELS * RPMH_ARC_LEVEL_SIZE) {
+		aggr_vreg_err(aggr_vreg, "more ARC levels defined than allowed: %d > %d\n",
+			len, RPMH_ARC_MAX_LEVELS * RPMH_ARC_LEVEL_SIZE);
+		return -EINVAL;
+	} else if (len % RPMH_ARC_LEVEL_SIZE) {
+		aggr_vreg_err(aggr_vreg, "invalid ARC aux data size: %d\n",
+			len);
+		return -EINVAL;
+	}
+
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	rc = cmd_db_read_aux_data(aggr_vreg->resource_name, buf, len);
+	if (rc < 0) {
+		aggr_vreg_err(aggr_vreg, "could not retrieve ARC aux data, rc=%d\n",
+			rc);
+		goto done;
+	} else if (rc != len) {
+		aggr_vreg_err(aggr_vreg, "could not retrieve all ARC aux data, %d != %d\n",
+			rc, len);
+		rc = -EINVAL;
+		goto done;
+	}
+	rc = 0;
+
+	aggr_vreg->level_count = len / RPMH_ARC_LEVEL_SIZE;
+
+	for (i = 0; i < aggr_vreg->level_count; i++) {
+		for (j = 0; j < RPMH_ARC_LEVEL_SIZE; j++)
+			aggr_vreg->level[i] |=
+				buf[i * RPMH_ARC_LEVEL_SIZE + j] << (8 * j);
+
+		/*
+		 * The AUX data may be zero padded.  These 0 valued entries at
+		 * the end of the map must be ignored.
+		 */
+		if (i > 0 && aggr_vreg->level[i] == 0) {
+			aggr_vreg->level_count = i;
+			break;
+		}
+
+		aggr_vreg_debug(aggr_vreg, "ARC hlvl=%2d --> vlvl=%4u\n",
+				i, aggr_vreg->level[i]);
+	}
+
+done:
+	kfree(buf);
+	return rc;
+}
+
+/**
+ * rpmh_regulator_parse_vrm_modes() - parse the supported mode configurations
+ *		for a VRM RPMh resource from device tree
+ * @aggr_vreg:		Pointer to the aggregated rpmh regulator resource
+ *
+ * This function initializes the mode[] array of aggr_vreg based upon the values
+ * of optional device tree properties.
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_parse_vrm_modes(struct rpmh_aggr_vreg *aggr_vreg)
+{
+	struct device_node *node = aggr_vreg->dev->of_node;
+	const char *type = "";
+	const struct rpmh_regulator_mode *map;
+	const char *prop;
+	int i, len, rc;
+	u32 *buf;
+
+	aggr_vreg->regulator_hw_type = RPMH_REGULATOR_HW_TYPE_UNKNOWN;
+
+	/* qcom,regulator-type is optional */
+	prop = "qcom,regulator-type";
+	if (!of_find_property(node, prop, &len))
+		return 0;
+
+	rc = of_property_read_string(node, prop, &type);
+	if (rc) {
+		aggr_vreg_err(aggr_vreg, "unable to read %s, rc=%d\n",
+				prop, rc);
+		return rc;
+	}
+
+	if (!strcmp(type, "pmic4-ldo")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_LDO;
+	} else if (!strcmp(type, "pmic4-hfsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_HFSMPS;
+	} else if (!strcmp(type, "pmic4-ftsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_FTSMPS;
+	} else if (!strcmp(type, "pmic4-bob")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC4_BOB;
+	} else if (!strcmp(type, "pmic5-ldo")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_LDO;
+	} else if (!strcmp(type, "pmic5-hfsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_HFSMPS;
+	} else if (!strcmp(type, "pmic5-ftsmps")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_FTSMPS;
+	} else if (!strcmp(type, "pmic5-bob")) {
+		aggr_vreg->regulator_hw_type
+			= RPMH_REGULATOR_HW_TYPE_PMIC5_BOB;
+	} else {
+		aggr_vreg_err(aggr_vreg, "unknown %s = %s\n",
+				prop, type);
+		return -EINVAL;
+	}
+
+	map = rpmh_regulator_mode_map[aggr_vreg->regulator_hw_type];
+
+	/* qcom,supported-modes is optional */
+	prop = "qcom,supported-modes";
+	if (!of_find_property(node, prop, &len))
+		return 0;
+
+	len /= sizeof(u32);
+	aggr_vreg->mode = devm_kcalloc(aggr_vreg->dev, len,
+					sizeof(*aggr_vreg->mode), GFP_KERNEL);
+	if (!aggr_vreg->mode)
+		return -ENOMEM;
+	aggr_vreg->mode_count = len;
+
+
+	buf = kcalloc(len, sizeof(*buf), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	rc = of_property_read_u32_array(node, prop, buf, len);
+	if (rc) {
+		aggr_vreg_err(aggr_vreg, "unable to read %s, rc=%d\n",
+			prop, rc);
+		goto done;
+	}
+
+	for (i = 0; i < len; i++) {
+		if (buf[i] >= RPMH_REGULATOR_MODE_COUNT) {
+			aggr_vreg_err(aggr_vreg, "element %d of %s = %u is invalid\n",
+				i, prop, buf[i]);
+			rc = -EINVAL;
+			goto done;
+		}
+
+		if (!map[buf[i]].framework_mode) {
+			aggr_vreg_err(aggr_vreg, "element %d of %s = %u is invalid for regulator type = %s\n",
+				i, prop, buf[i], type);
+			rc = -EINVAL;
+			goto done;
+		}
+
+		aggr_vreg->mode[i].pmic_mode = map[buf[i]].pmic_mode;
+		aggr_vreg->mode[i].framework_mode = map[buf[i]].framework_mode;
+
+		if (i > 0 && aggr_vreg->mode[i].pmic_mode
+				<= aggr_vreg->mode[i - 1].pmic_mode) {
+			aggr_vreg_err(aggr_vreg, "%s elements are not in ascending order\n",
+				prop);
+			rc = -EINVAL;
+			goto done;
+		}
+	}
+
+	prop = "qcom,mode-threshold-currents";
+
+	rc = of_property_read_u32_array(node, prop, buf, len);
+	if (rc) {
+		aggr_vreg_err(aggr_vreg, "unable to read %s, rc=%d\n",
+			prop, rc);
+		goto done;
+	}
+
+	for (i = 0; i < len; i++) {
+		aggr_vreg->mode[i].min_load_ua = buf[i];
+
+		if (i > 0 && aggr_vreg->mode[i].min_load_ua
+				<= aggr_vreg->mode[i - 1].min_load_ua) {
+			aggr_vreg_err(aggr_vreg, "%s elements are not in ascending order\n",
+				prop);
+			rc = -EINVAL;
+			goto done;
+		}
+	}
+
+done:
+	kfree(buf);
+	return rc;
+}
+
+/**
+ * rpmh_regulator_allocate_vreg() - allocate space for the regulators associated
+ *		with the RPMh regulator resource and initialize important
+ *		pointers for each regulator
+ * @aggr_vreg:		Pointer to the aggregated rpmh regulator resource
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_allocate_vreg(struct rpmh_aggr_vreg *aggr_vreg)
+{
+	struct device_node *node;
+	int i, rc;
+
+	aggr_vreg->vreg_count = 0;
+
+	for_each_available_child_of_node(aggr_vreg->dev->of_node, node) {
+		/* Skip child nodes handled by other drivers. */
+		if (of_find_property(node, "compatible", NULL))
+			continue;
+		aggr_vreg->vreg_count++;
+	}
+
+	if (aggr_vreg->vreg_count == 0) {
+		aggr_vreg_err(aggr_vreg, "could not find any regulator subnodes\n");
+		return -ENODEV;
+	}
+
+	aggr_vreg->vreg = devm_kcalloc(aggr_vreg->dev, aggr_vreg->vreg_count,
+			sizeof(*aggr_vreg->vreg), GFP_KERNEL);
+	if (!aggr_vreg->vreg)
+		return -ENOMEM;
+
+	i = 0;
+	for_each_available_child_of_node(aggr_vreg->dev->of_node, node) {
+		/* Skip child nodes handled by other drivers. */
+		if (of_find_property(node, "compatible", NULL))
+			continue;
+
+		aggr_vreg->vreg[i].of_node = node;
+		aggr_vreg->vreg[i].aggr_vreg = aggr_vreg;
+
+		rc = of_property_read_string(node, "regulator-name",
+						&aggr_vreg->vreg[i].rdesc.name);
+		if (rc) {
+			aggr_vreg_err(aggr_vreg, "could not read regulator-name property, rc=%d\n",
+				rc);
+			return rc;
+		}
+
+		i++;
+	}
+
+	return 0;
+}
+
+/**
+ * rpmh_regulator_load_default_parameters() - initialize the RPMh resource
+ *		request for this regulator based on optional device tree
+ *		properties
+ * @vreg:		Pointer to the RPMh regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_load_default_parameters(struct rpmh_vreg *vreg)
+{
+	enum rpmh_regulator_type type = vreg->aggr_vreg->regulator_type;
+	const struct rpmh_regulator_mode *map;
+	const char *prop;
+	int i, rc;
+	u32 temp;
+
+	if (type == RPMH_REGULATOR_TYPE_ARC) {
+		prop = "qcom,init-voltage-level";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc) {
+			for (i = 0; i < vreg->aggr_vreg->level_count; i++)
+				if (temp <= vreg->aggr_vreg->level[i])
+					break;
+			if (i < vreg->aggr_vreg->level_count) {
+				rpmh_regulator_set_reg(vreg,
+					RPMH_REGULATOR_REG_ARC_LEVEL, i);
+			} else {
+				vreg_err(vreg, "%s=%u is invalid\n",
+					prop, temp);
+				return -EINVAL;
+			}
+		}
+
+		prop = "qcom,min-dropout-voltage-level";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc)
+			vreg->rdesc.min_dropout_uV = temp;
+	} else if (type == RPMH_REGULATOR_TYPE_VRM) {
+		prop = "qcom,init-enable";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc)
+			rpmh_regulator_set_reg(vreg,
+						RPMH_REGULATOR_REG_VRM_ENABLE,
+						!!temp);
+
+		prop = "qcom,init-voltage";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc) {
+			if (temp < RPMH_VRM_MIN_UV || temp > RPMH_VRM_MAX_UV) {
+				vreg_err(vreg, "%s=%u is invalid\n",
+					prop, temp);
+				return -EINVAL;
+			}
+			rpmh_regulator_set_reg(vreg,
+						RPMH_REGULATOR_REG_VRM_VOLTAGE,
+						temp / 1000);
+		}
+
+		prop = "qcom,init-mode";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc) {
+			if (temp >= RPMH_REGULATOR_MODE_COUNT) {
+				vreg_err(vreg, "%s=%u is invalid\n",
+					prop, temp);
+				return -EINVAL;
+			} else if (vreg->aggr_vreg->regulator_hw_type
+					== RPMH_REGULATOR_HW_TYPE_UNKNOWN) {
+				vreg_err(vreg, "qcom,regulator-type missing so %s cannot be used\n",
+					prop);
+				return -EINVAL;
+			}
+
+			map = rpmh_regulator_mode_map[
+					vreg->aggr_vreg->regulator_hw_type];
+			if (!map[temp].framework_mode) {
+				vreg_err(vreg, "%s=%u is not supported by type = %d\n",
+					prop, temp,
+					vreg->aggr_vreg->regulator_hw_type);
+				return -EINVAL;
+			}
+
+			rpmh_regulator_set_reg(vreg,
+						RPMH_REGULATOR_REG_VRM_MODE,
+						map[temp].pmic_mode);
+			for (i = 0; i < vreg->aggr_vreg->mode_count; i++) {
+				if (vreg->aggr_vreg->mode[i].pmic_mode
+				    == map[temp].pmic_mode) {
+					vreg->mode_index = i;
+					break;
+				}
+			}
+		}
+
+		prop = "qcom,init-headroom-voltage";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc) {
+			if (temp < RPMH_VRM_HEADROOM_MIN_UV ||
+			    temp > RPMH_VRM_HEADROOM_MAX_UV) {
+				vreg_err(vreg, "%s=%u is invalid\n",
+					prop, temp);
+				return -EINVAL;
+			}
+			rpmh_regulator_set_reg(vreg,
+						RPMH_REGULATOR_REG_VRM_HEADROOM,
+						temp / 1000);
+		}
+
+		prop = "qcom,min-dropout-voltage";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc)
+			vreg->rdesc.min_dropout_uV = temp;
+	} else if (type == RPMH_REGULATOR_TYPE_XOB) {
+		prop = "qcom,init-enable";
+		rc = of_property_read_u32(vreg->of_node, prop, &temp);
+		if (!rc)
+			rpmh_regulator_set_reg(vreg,
+						RPMH_REGULATOR_REG_XOB_ENABLE,
+						!!temp);
+	}
+
+	return 0;
+}
+
+/**
+ * rpmh_regulator_init_vreg_supply() - initialize the regulator's parent supply
+ *		mapping based on optional DT parent supply property
+ * @vreg:		Pointer to the RPMh regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_init_vreg_supply(struct rpmh_vreg *vreg)
+{
+	char *buf;
+	size_t len;
+
+	len = strlen(vreg->rdesc.name) + 16;
+	buf = kzalloc(len, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+	scnprintf(buf, len, "%s-parent-supply", vreg->rdesc.name);
+
+	if (of_find_property(vreg->aggr_vreg->dev->of_node, buf, NULL)) {
+		kfree(buf);
+
+		len = strlen(vreg->rdesc.name) + 10;
+		buf = devm_kzalloc(vreg->aggr_vreg->dev, len, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+		scnprintf(buf, len, "%s-parent", vreg->rdesc.name);
+
+		vreg->rdesc.supply_name = buf;
+	} else {
+		kfree(buf);
+	}
+
+	return 0;
+}
+
+/**
+ * rpmh_regulator_init_vreg() - initialize all abbributes of an rpmh-regulator
+ * @vreg:		Pointer to the RPMh regulator
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_init_vreg(struct rpmh_vreg *vreg)
+{
+	struct device *dev = vreg->aggr_vreg->dev;
+	enum rpmh_regulator_type type = vreg->aggr_vreg->regulator_type;
+	struct regulator_config reg_config = {};
+	struct regulator_init_data *init_data;
+	struct regulator_ops *ops;
+	int rc, i;
+	u32 set;
+
+	ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL);
+	if (!ops)
+		return -ENOMEM;
+
+	*ops			= *rpmh_regulator_ops[type];
+	vreg->rdesc.owner	= THIS_MODULE;
+	vreg->rdesc.type	= REGULATOR_VOLTAGE;
+	vreg->rdesc.ops		= ops;
+
+	init_data = of_get_regulator_init_data(dev,
+						vreg->of_node, &vreg->rdesc);
+	if (init_data == NULL)
+		return -ENOMEM;
+
+	init_data->constraints.input_uV = init_data->constraints.max_uV;
+	if (type == RPMH_REGULATOR_TYPE_VRM) {
+		init_data->constraints.min_uV
+			= max(init_data->constraints.min_uV, RPMH_VRM_MIN_UV);
+		init_data->constraints.min_uV
+			= min(init_data->constraints.min_uV, RPMH_VRM_MAX_UV);
+		init_data->constraints.max_uV
+			= max(init_data->constraints.max_uV, RPMH_VRM_MIN_UV);
+		init_data->constraints.max_uV
+			= min(init_data->constraints.max_uV, RPMH_VRM_MAX_UV);
+	}
+
+	if (ops->set_voltage || ops->set_voltage_sel)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_VOLTAGE;
+
+	if (type == RPMH_REGULATOR_TYPE_XOB
+	    && init_data->constraints.min_uV == init_data->constraints.max_uV)
+		vreg->rdesc.fixed_uV = init_data->constraints.min_uV;
+
+	if (vreg->aggr_vreg->mode_count) {
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_DRMS;
+		for (i = 0; i < vreg->aggr_vreg->mode_count; i++)
+			init_data->constraints.valid_modes_mask
+				|= vreg->aggr_vreg->mode[i].framework_mode;
+	} else {
+		ops->get_mode = NULL;
+		ops->set_mode = NULL;
+		ops->set_load = NULL;
+	}
+
+	/*
+	 * Remove enable state control if the ARC resource does not support the
+	 * off level.
+	 */
+	if (type == RPMH_REGULATOR_TYPE_ARC
+	    && vreg->aggr_vreg->level[0] != RPMH_REGULATOR_LEVEL_OFF) {
+		ops->enable = NULL;
+		ops->disable = NULL;
+		ops->is_enabled = NULL;
+	}
+	if (ops->enable)
+		init_data->constraints.valid_ops_mask
+			|= REGULATOR_CHANGE_STATUS;
+
+	switch (type) {
+	case RPMH_REGULATOR_TYPE_VRM:
+		vreg->rdesc.n_voltages = 2;
+		break;
+	case RPMH_REGULATOR_TYPE_ARC:
+		vreg->rdesc.n_voltages = vreg->aggr_vreg->level_count;
+		break;
+	case RPMH_REGULATOR_TYPE_XOB:
+		vreg->rdesc.n_voltages = 1;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	rc = of_property_read_u32(vreg->of_node, "qcom,set", &set);
+	if (rc) {
+		vreg_err(vreg, "qcom,set property missing, rc=%d\n", rc);
+		return rc;
+	} else if (!(set & RPMH_REGULATOR_SET_ALL)) {
+		vreg_err(vreg, "qcom,set=%u property is invalid\n", set);
+		return rc;
+	}
+
+	vreg->set_active = !!(set & RPMH_REGULATOR_SET_ACTIVE);
+	vreg->set_sleep = !!(set & RPMH_REGULATOR_SET_SLEEP);
+
+	rc = rpmh_regulator_init_vreg_supply(vreg);
+	if (rc) {
+		vreg_err(vreg, "unable to initialize regulator supply name, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	reg_config.dev			= dev;
+	reg_config.init_data		= init_data;
+	reg_config.of_node		= vreg->of_node;
+	reg_config.driver_data		= vreg;
+
+	rc = rpmh_regulator_load_default_parameters(vreg);
+	if (rc) {
+		vreg_err(vreg, "unable to load default parameters, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	vreg->rdev = devm_regulator_register(dev, &vreg->rdesc, &reg_config);
+	if (IS_ERR(vreg->rdev)) {
+		rc = PTR_ERR(vreg->rdev);
+		vreg->rdev = NULL;
+		vreg_err(vreg, "devm_regulator_register() failed, rc=%d\n", rc);
+		return rc;
+	}
+
+	vreg_debug(vreg, "successfully registered; set=%s\n",
+		vreg->set_active && vreg->set_sleep
+			? "active + sleep"
+			: vreg->set_active ? "active" : "sleep");
+
+	return rc;
+}
+
+static const struct of_device_id rpmh_regulator_match_table[] = {
+	{
+		.compatible = "qcom,rpmh-vrm-regulator",
+		.data = (void *)(uintptr_t)RPMH_REGULATOR_TYPE_VRM,
+	},
+	{
+		.compatible = "qcom,rpmh-arc-regulator",
+		.data = (void *)(uintptr_t)RPMH_REGULATOR_TYPE_ARC,
+	},
+	{
+		.compatible = "qcom,rpmh-xob-regulator",
+		.data = (void *)(uintptr_t)RPMH_REGULATOR_TYPE_XOB,
+	},
+	{}
+};
+
+/**
+ * rpmh_regulator_probe() - probe an aggregated RPMh regulator resource and
+ *		register regulators for each of the regulator nodes associated
+ *		with it
+ * @pdev:		Pointer to the platform device of the aggregated rpmh
+ *			regulator resource
+ *
+ * Return: 0 on success, errno on failure
+ */
+static int rpmh_regulator_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	const struct of_device_id *match;
+	struct rpmh_aggr_vreg *aggr_vreg;
+	struct device_node *node;
+	int rc, i, sid;
+
+	node = dev->of_node;
+
+	if (!node) {
+		dev_err(dev, "Device tree node is missing\n");
+		return -EINVAL;
+	}
+
+	aggr_vreg = devm_kzalloc(dev, sizeof(*aggr_vreg), GFP_KERNEL);
+	if (!aggr_vreg)
+		return -ENOMEM;
+
+	aggr_vreg->dev = dev;
+	mutex_init(&aggr_vreg->lock);
+
+	match = of_match_node(rpmh_regulator_match_table, node);
+	if (match) {
+		aggr_vreg->regulator_type = (uintptr_t)match->data;
+	} else {
+		dev_err(dev, "could not find compatible string match\n");
+		return -ENODEV;
+	}
+
+	rc = of_property_read_string(node, "qcom,resource-name",
+				     &aggr_vreg->resource_name);
+	if (rc) {
+		dev_err(dev, "qcom,resource-name missing in DT node\n");
+		return rc;
+	}
+
+	aggr_vreg->addr = cmd_db_read_addr(aggr_vreg->resource_name);
+	if (!aggr_vreg->addr) {
+		aggr_vreg_err(aggr_vreg, "could not find RPMh address for resource\n");
+		return -ENODEV;
+	}
+
+	sid = cmd_db_read_slave_id(aggr_vreg->resource_name);
+	if (sid < 0) {
+		aggr_vreg_err(aggr_vreg, "could not find RPMh slave id for resource, rc=%d\n",
+			sid);
+		return sid;
+	}
+
+	/* Confirm slave ID listed in command DB matches DT configuration. */
+	if ((aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
+			&& sid != CMD_DB_HW_ARC)
+	    || (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM
+			&& sid != CMD_DB_HW_VRM)
+	    || (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_XOB
+			&& sid != CMD_DB_HW_XOB)) {
+		aggr_vreg_err(aggr_vreg, "RPMh slave ID mismatch; config=%d (%s) != cmd-db=%d\n",
+			aggr_vreg->regulator_type,
+			aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
+				? "ARC" : (aggr_vreg->regulator_type
+						== RPMH_REGULATOR_TYPE_VRM
+					  ? "VRM" : "XOB"),
+			sid);
+		return -EINVAL;
+	}
+
+	if (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC) {
+		rc = rpmh_regulator_load_arc_level_mapping(aggr_vreg);
+		if (rc) {
+			aggr_vreg_err(aggr_vreg, "could not load arc level mapping, rc=%d\n",
+				rc);
+			return rc;
+		}
+	} else if (aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_VRM) {
+		rc = rpmh_regulator_parse_vrm_modes(aggr_vreg);
+		if (rc) {
+			aggr_vreg_err(aggr_vreg, "could not parse vrm mode mapping, rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
+	aggr_vreg->always_wait_for_ack
+		= of_property_read_bool(node, "qcom,always-wait-for-ack");
+
+	rc = rpmh_regulator_allocate_vreg(aggr_vreg);
+	if (rc) {
+		aggr_vreg_err(aggr_vreg, "failed to allocate regulator subnode array, rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	for (i = 0; i < aggr_vreg->vreg_count; i++) {
+		rc = rpmh_regulator_init_vreg(&aggr_vreg->vreg[i]);
+		if (rc) {
+			pr_err("unable to initialize rpmh-regulator vreg %s for resource %s, rc=%d\n",
+				aggr_vreg->vreg[i].rdesc.name,
+				aggr_vreg->resource_name, rc);
+			return rc;
+		}
+	}
+
+	if (of_property_read_bool(node, "qcom,send-defaults")) {
+		mutex_lock(&aggr_vreg->lock);
+		rc = rpmh_regulator_send_aggregate_requests(
+					&aggr_vreg->vreg[0]);
+		if (rc) {
+			aggr_vreg_err(aggr_vreg, "error while sending default request, rc=%d\n",
+				rc);
+			mutex_unlock(&aggr_vreg->lock);
+			return rc;
+		}
+		mutex_unlock(&aggr_vreg->lock);
+	}
+
+	of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+	platform_set_drvdata(pdev, aggr_vreg);
+
+	aggr_vreg_debug(aggr_vreg, "successfully probed; addr=0x%05X, type=%s\n",
+			aggr_vreg->addr,
+			aggr_vreg->regulator_type == RPMH_REGULATOR_TYPE_ARC
+				? "ARC"
+				: (aggr_vreg->regulator_type
+						== RPMH_REGULATOR_TYPE_VRM
+					? "VRM" : "XOB"));
+
+	return rc;
+}
+
+static struct platform_driver rpmh_regulator_driver = {
+	.driver = {
+		.name		= "qcom,rpmh-regulator",
+		.of_match_table	= rpmh_regulator_match_table,
+	},
+	.probe = rpmh_regulator_probe,
+};
+
+static int rpmh_regulator_init(void)
+{
+	return platform_driver_register(&rpmh_regulator_driver);
+}
+
+static void rpmh_regulator_exit(void)
+{
+	platform_driver_unregister(&rpmh_regulator_driver);
+}
+
+MODULE_DESCRIPTION("RPMh regulator driver");
+MODULE_LICENSE("GPL v2");
+
+arch_initcall(rpmh_regulator_init);
+module_exit(rpmh_regulator_exit);
diff --git a/drivers/soc/qcom/subsys-pil-tz.c b/drivers/soc/qcom/subsys-pil-tz.c
index 912c8af..3249bf8 100644
--- a/drivers/soc/qcom/subsys-pil-tz.c
+++ b/drivers/soc/qcom/subsys-pil-tz.c
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0-only
 /*
- * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
  */
 
 #include <linux/kernel.h>
@@ -953,12 +953,14 @@
 	uint32_t err_value;
 
 	err_value =  __raw_readl(d->err_status);
-	pr_debug("PBL_DONE received from %s!\n", d->subsys_desc.name);
+
 	if (err_value) {
 		uint32_t rmb_err_spare0;
 		uint32_t rmb_err_spare1;
 		uint32_t rmb_err_spare2;
 
+		pr_debug("PBL_DONE received from %s!\n", d->subsys_desc.name);
+
 		rmb_err_spare2 =  __raw_readl(d->err_status_spare);
 		rmb_err_spare1 =  __raw_readl(d->err_status_spare-4);
 		rmb_err_spare0 =  __raw_readl(d->err_status_spare-8);
@@ -971,6 +973,9 @@
 			rmb_err_spare1);
 		pr_err("PBL error status spare2 register: 0x%08x\n",
 			rmb_err_spare2);
+	} else {
+		pr_info("PBL_DONE - 1st phase loading [%s] completed ok\n",
+			d->subsys_desc.name);
 	}
 	__raw_writel(BIT(d->bits_arr[PBL_DONE]), d->irq_clear);
 }
@@ -979,6 +984,10 @@
 {
 	pr_debug("Subsystem error services up received from %s\n",
 							d->subsys_desc.name);
+
+	pr_info("SW_INIT_DONE - 2nd phase loading [%s] completed ok\n",
+		d->subsys_desc.name);
+
 	__raw_writel(BIT(d->bits_arr[ERR_READY]), d->irq_clear);
 	complete_err_ready(d->subsys);
 }
diff --git a/drivers/soc/qcom/subsystem_notif.c b/drivers/soc/qcom/subsystem_notif.c
index b5c36a5..39c8e1a 100644
--- a/drivers/soc/qcom/subsystem_notif.c
+++ b/drivers/soc/qcom/subsystem_notif.c
@@ -21,10 +21,25 @@
 #include <linux/slab.h>
 #include <soc/qcom/subsystem_notif.h>
 
+/**
+ * The callbacks that are registered in this data structure as early
+ * notification callbacks will be called as soon as the SSR framework is
+ * informed that the subsystem has crashed. This means that these functions will
+ * be invoked as part of an IRQ handler, and thus, will be called in an atomic
+ * context. Therefore, functions that are registered as early notification
+ * callback must obey to the same constraints as interrupt handlers
+ * (i.e. these functions must not sleep or block, etc).
+ */
+struct subsys_early_notif_info {
+	spinlock_t cb_lock;
+	void (*early_notif_cb[NUM_EARLY_NOTIFS])(void *);
+	void *data[NUM_EARLY_NOTIFS];
+};
 
 struct subsys_notif_info {
 	char name[50];
 	struct srcu_notifier_head subsys_notif_rcvr_list;
+	struct subsys_early_notif_info early_notif_info;
 	struct list_head list;
 };
 
@@ -94,6 +109,105 @@
 }
 EXPORT_SYMBOL(subsys_notif_unregister_notifier);
 
+void send_early_notifications(void *early_notif_handle)
+{
+	struct subsys_early_notif_info *early_info = early_notif_handle;
+	unsigned long flags;
+	unsigned int i;
+	void (*notif_cb)(void *data);
+
+	if (!early_notif_handle)
+		return;
+
+	spin_lock_irqsave(&early_info->cb_lock, flags);
+	for (i = 0; i < NUM_EARLY_NOTIFS; i++) {
+		notif_cb = early_info->early_notif_cb[i];
+		if (notif_cb)
+			notif_cb(early_info->data[i]);
+	}
+	spin_unlock_irqrestore(&early_info->cb_lock, flags);
+}
+EXPORT_SYMBOL(send_early_notifications);
+
+static bool valid_early_notif(enum early_subsys_notif_type notif_type)
+{
+	return  notif_type >= 0 && notif_type < NUM_EARLY_NOTIFS;
+}
+
+/**
+ * The early_notif_cb parameter must point to a function that conforms to the
+ * same constraints placed upon interrupt handlers, as the function will be
+ * called in an atomic context (i.e. these functions must not sleep or block).
+ */
+int subsys_register_early_notifier(const char *subsys_name,
+				   enum early_subsys_notif_type notif_type,
+				   void (*early_notif_cb)(void *), void *data)
+{
+	struct subsys_notif_info *subsys;
+	struct subsys_early_notif_info *early_notif_info;
+	unsigned long flags;
+	int rc = 0;
+
+	if (!subsys_name || !early_notif_cb || !valid_early_notif(notif_type))
+		return -EINVAL;
+
+	subsys = _notif_find_subsys(subsys_name);
+	if (!subsys)
+		return -EINVAL;
+
+	early_notif_info = &subsys->early_notif_info;
+	spin_lock_irqsave(&early_notif_info->cb_lock, flags);
+	if (early_notif_info->early_notif_cb[notif_type]) {
+		rc = -EEXIST;
+		goto out;
+	}
+	early_notif_info->early_notif_cb[notif_type] = early_notif_cb;
+	early_notif_info->data[notif_type] = data;
+out:
+	spin_unlock_irqrestore(&early_notif_info->cb_lock, flags);
+	return rc;
+}
+EXPORT_SYMBOL(subsys_register_early_notifier);
+
+int subsys_unregister_early_notifier(const char *subsys_name, enum
+				     early_subsys_notif_type notif_type)
+{
+	struct subsys_notif_info *subsys;
+	struct subsys_early_notif_info *early_notif_info;
+	unsigned long flags;
+
+	if (!subsys_name || !valid_early_notif(notif_type))
+		return -EINVAL;
+
+	subsys = _notif_find_subsys(subsys_name);
+	if (!subsys)
+		return -EINVAL;
+
+	early_notif_info = &subsys->early_notif_info;
+	spin_lock_irqsave(&early_notif_info->cb_lock, flags);
+	early_notif_info->early_notif_cb[notif_type] = NULL;
+	early_notif_info->data[notif_type] = NULL;
+	spin_unlock_irqrestore(&early_notif_info->cb_lock, flags);
+	return 0;
+}
+EXPORT_SYMBOL(subsys_unregister_early_notifier);
+
+void *subsys_get_early_notif_info(const char *subsys_name)
+{
+	struct subsys_notif_info *subsys;
+
+	if (!subsys_name)
+		return ERR_PTR(-EINVAL);
+
+	subsys = _notif_find_subsys(subsys_name);
+
+	if (!subsys)
+		return ERR_PTR(-EINVAL);
+
+	return &subsys->early_notif_info;
+}
+EXPORT_SYMBOL(subsys_get_early_notif_info);
+
 void *subsys_notif_add_subsys(const char *subsys_name)
 {
 	struct subsys_notif_info *subsys = NULL;
@@ -121,6 +235,9 @@
 
 	srcu_init_notifier_head(&subsys->subsys_notif_rcvr_list);
 
+	memset(&subsys->early_notif_info, 0, sizeof(struct
+						    subsys_early_notif_info));
+	spin_lock_init(&subsys->early_notif_info.cb_lock);
 	INIT_LIST_HEAD(&subsys->list);
 
 	mutex_lock(&notif_lock);
diff --git a/drivers/soc/qcom/subsystem_restart.c b/drivers/soc/qcom/subsystem_restart.c
index 3aedff1..3e91392 100644
--- a/drivers/soc/qcom/subsystem_restart.c
+++ b/drivers/soc/qcom/subsystem_restart.c
@@ -187,6 +187,7 @@
 	struct subsys_tracking track;
 
 	void *notify;
+	void *early_notify;
 	struct device dev;
 	struct module *owner;
 	int count;
@@ -1218,6 +1219,8 @@
 
 	name = dev->desc->name;
 
+	send_early_notifications(dev->early_notify);
+
 	/*
 	 * If a system reboot/shutdown is underway, ignore subsystem errors.
 	 * However, print a message so that we know that a subsystem behaved
@@ -1792,6 +1795,7 @@
 			sizeof(subsys->desc->fw_name));
 
 	subsys->notify = subsys_notif_add_subsys(desc->name);
+	subsys->early_notify = subsys_get_early_notif_info(desc->name);
 
 	snprintf(subsys->wlname, sizeof(subsys->wlname), "ssr(%s)", desc->name);
 	wakeup_source_init(&subsys->ssr_wlock, subsys->wlname);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index a4ecdaf..628f363 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -142,6 +142,7 @@
 };
 
 static const char *const state_names[] = {
+	[DRD_STATE_UNDEFINED] = "undefined",
 	[DRD_STATE_IDLE] = "idle",
 	[DRD_STATE_PERIPHERAL] = "peripheral",
 	[DRD_STATE_PERIPHERAL_SUSPEND] = "peripheral_suspend",
@@ -152,7 +153,7 @@
 const char *dwc3_drd_state_string(enum dwc3_drd_state state)
 {
 	if (state < 0 || state >= ARRAY_SIZE(state_names))
-		return "UNDEFINED";
+		return "UNKNOWN";
 
 	return state_names[state];
 }
diff --git a/include/dt-bindings/clock/qcom,gcc-lito.h b/include/dt-bindings/clock/qcom,gcc-lito.h
index b3b4b6d..25c44e6 100644
--- a/include/dt-bindings/clock/qcom,gcc-lito.h
+++ b/include/dt-bindings/clock/qcom,gcc-lito.h
@@ -16,19 +16,18 @@
 #define GCC_CFG_NOC_USB3_PRIM_AXI_CLK				9
 #define GCC_CPUSS_AHB_CLK					10
 #define GCC_CPUSS_AHB_CLK_SRC					11
-#define GCC_CPUSS_GNOC_CLK					12
-#define GCC_CPUSS_RBCPR_CLK					13
-#define GCC_DDRSS_GPU_AXI_CLK					14
-#define GCC_DISP_AHB_CLK					15
-#define GCC_DISP_GPLL0_CLK_SRC					16
-#define GCC_DISP_HF_AXI_CLK					17
-#define GCC_DISP_SF_AXI_CLK					18
-#define GCC_DISP_THROTTLE_HF_AXI_CLK				19
-#define GCC_DISP_THROTTLE_SF_AXI_CLK				20
-#define GCC_DISP_XO_CLK						21
-#define GCC_DPM_AHB_CLK						22
-#define GCC_DPM_CLK						23
-#define GCC_DPM_CLK_SRC						24
+#define GCC_CPUSS_RBCPR_CLK					12
+#define GCC_DDRSS_GPU_AXI_CLK					13
+#define GCC_DISP_AHB_CLK					14
+#define GCC_DISP_GPLL0_CLK_SRC					15
+#define GCC_DISP_HF_AXI_CLK					16
+#define GCC_DISP_SF_AXI_CLK					17
+#define GCC_DISP_THROTTLE_HF_AXI_CLK				18
+#define GCC_DISP_THROTTLE_SF_AXI_CLK				19
+#define GCC_DISP_XO_CLK						20
+#define GCC_DPM_AHB_CLK						21
+#define GCC_DPM_CLK						22
+#define GCC_DPM_CLK_SRC						23
 #define GCC_GP1_CLK						25
 #define GCC_GP1_CLK_SRC						26
 #define GCC_GP2_CLK						27
@@ -47,90 +46,97 @@
 #define GCC_NPU_BWMON_CFG_AHB_CLK				40
 #define GCC_NPU_CFG_AHB_CLK					41
 #define GCC_NPU_DMA_CLK						42
-#define GCC_NPU_GPLL0_CLK_SRC					43
-#define GCC_NPU_GPLL0_DIV_CLK_SRC				44
-#define GCC_PDM2_CLK						45
-#define GCC_PDM2_CLK_SRC					46
-#define GCC_PDM_AHB_CLK						47
-#define GCC_PDM_XO4_CLK						48
-#define GCC_PRNG_AHB_CLK					49
-#define GCC_QMIP_CAMERA_NRT_AHB_CLK				50
-#define GCC_QMIP_CAMERA_RT_AHB_CLK				51
-#define GCC_QMIP_DISP_AHB_CLK					52
-#define GCC_QMIP_RT_DISP_AHB_CLK				53
-#define GCC_QMIP_VIDEO_CVP_AHB_CLK				54
-#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				55
-#define GCC_QUPV3_WRAP0_S0_CLK					56
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				57
-#define GCC_QUPV3_WRAP0_S1_CLK					58
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				59
-#define GCC_QUPV3_WRAP0_S2_CLK					60
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				61
-#define GCC_QUPV3_WRAP0_S3_CLK					62
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				63
-#define GCC_QUPV3_WRAP0_S4_CLK					64
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				65
-#define GCC_QUPV3_WRAP0_S5_CLK					66
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				67
-#define GCC_QUPV3_WRAP1_S0_CLK					68
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				69
-#define GCC_QUPV3_WRAP1_S1_CLK					70
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				71
-#define GCC_QUPV3_WRAP1_S2_CLK					72
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				73
-#define GCC_QUPV3_WRAP1_S3_CLK					74
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				75
-#define GCC_QUPV3_WRAP1_S4_CLK					76
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				77
-#define GCC_QUPV3_WRAP1_S5_CLK					78
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				79
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				80
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				81
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				82
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				83
-#define GCC_SDCC1_AHB_CLK					84
-#define GCC_SDCC1_APPS_CLK					85
-#define GCC_SDCC1_APPS_CLK_SRC					86
-#define GCC_SDCC1_ICE_CORE_CLK					87
-#define GCC_SDCC1_ICE_CORE_CLK_SRC				88
-#define GCC_SDCC2_AHB_CLK					89
-#define GCC_SDCC2_APPS_CLK					90
-#define GCC_SDCC2_APPS_CLK_SRC					91
-#define GCC_SDCC4_AHB_CLK					92
-#define GCC_SDCC4_APPS_CLK					93
-#define GCC_SDCC4_APPS_CLK_SRC					94
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				95
-#define GCC_UFS_1X_CLKREF_CLK					96
-#define GCC_UFS_PHY_AHB_CLK					97
-#define GCC_UFS_PHY_AXI_CLK					98
-#define GCC_UFS_PHY_AXI_CLK_SRC					99
-#define GCC_UFS_PHY_ICE_CORE_CLK				100
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				101
-#define GCC_UFS_PHY_PHY_AUX_CLK					102
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				103
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				104
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				105
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				106
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				107
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				108
-#define GCC_USB30_PRIM_MASTER_CLK				109
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				110
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				111
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			112
-#define GCC_USB30_PRIM_SLEEP_CLK				113
-#define GCC_USB3_PRIM_PHY_AUX_CLK				114
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				115
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				116
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				117
-#define GCC_VIDEO_AHB_CLK					118
-#define GCC_VIDEO_AXI_CLK					119
-#define GCC_VIDEO_THROTTLE1_AXI_CLK				120
-#define GCC_VIDEO_THROTTLE_AXI_CLK				121
-#define GCC_VIDEO_XO_CLK					122
-#define GCC_AGGRE_UFS_PHY_AXI_CLK				123
-#define GCC_AGGRE_USB3_PRIM_AXI_CLK				124
-#define GCC_BOOT_ROM_AHB_CLK					125
-#define GCC_CAMERA_AHB_CLK					126
+#define GCC_NPU_DMA_CLK_SRC					43
+#define GCC_NPU_GPLL0_CLK_SRC					44
+#define GCC_NPU_GPLL0_DIV_CLK_SRC				45
+#define GCC_PDM2_CLK						46
+#define GCC_PDM2_CLK_SRC					47
+#define GCC_PDM_AHB_CLK						48
+#define GCC_PDM_XO4_CLK						49
+#define GCC_PRNG_AHB_CLK					50
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK				51
+#define GCC_QMIP_CAMERA_RT_AHB_CLK				52
+#define GCC_QMIP_DISP_AHB_CLK					53
+#define GCC_QMIP_RT_DISP_AHB_CLK				54
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK				55
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK				56
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK				57
+#define GCC_QUPV3_WRAP0_CORE_CLK				58
+#define GCC_QUPV3_WRAP0_S0_CLK					59
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				60
+#define GCC_QUPV3_WRAP0_S1_CLK					61
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				62
+#define GCC_QUPV3_WRAP0_S2_CLK					63
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				64
+#define GCC_QUPV3_WRAP0_S3_CLK					65
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				66
+#define GCC_QUPV3_WRAP0_S4_CLK					67
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				68
+#define GCC_QUPV3_WRAP0_S5_CLK					69
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				70
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK				71
+#define GCC_QUPV3_WRAP1_CORE_CLK				72
+#define GCC_QUPV3_WRAP1_S0_CLK					73
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				74
+#define GCC_QUPV3_WRAP1_S1_CLK					75
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				76
+#define GCC_QUPV3_WRAP1_S2_CLK					77
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				78
+#define GCC_QUPV3_WRAP1_S3_CLK					79
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				80
+#define GCC_QUPV3_WRAP1_S4_CLK					81
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				82
+#define GCC_QUPV3_WRAP1_S5_CLK					83
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				84
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				85
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				86
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				87
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				88
+#define GCC_SDCC1_AHB_CLK					89
+#define GCC_SDCC1_APPS_CLK					90
+#define GCC_SDCC1_APPS_CLK_SRC					91
+#define GCC_SDCC1_ICE_CORE_CLK					92
+#define GCC_SDCC1_ICE_CORE_CLK_SRC				93
+#define GCC_SDCC2_AHB_CLK					94
+#define GCC_SDCC2_APPS_CLK					95
+#define GCC_SDCC2_APPS_CLK_SRC					96
+#define GCC_SDCC4_AHB_CLK					97
+#define GCC_SDCC4_APPS_CLK					98
+#define GCC_SDCC4_APPS_CLK_SRC					99
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				100
+#define GCC_UFS_1X_CLKREF_CLK					101
+#define GCC_UFS_PHY_AHB_CLK					102
+#define GCC_UFS_PHY_AXI_CLK					103
+#define GCC_UFS_PHY_AXI_CLK_SRC					104
+#define GCC_UFS_PHY_ICE_CORE_CLK				105
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				106
+#define GCC_UFS_PHY_PHY_AUX_CLK					107
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				108
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				109
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				110
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				111
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				112
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				113
+#define GCC_USB30_PRIM_MASTER_CLK				114
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				115
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				116
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			117
+#define GCC_USB30_PRIM_SLEEP_CLK				118
+#define GCC_USB3_PRIM_CLKREF_CLK				119
+#define GCC_USB3_PRIM_PHY_AUX_CLK				120
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				121
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				122
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				123
+#define GCC_VIDEO_AHB_CLK					124
+#define GCC_VIDEO_AXI_CLK					125
+#define GCC_VIDEO_THROTTLE1_AXI_CLK				126
+#define GCC_VIDEO_THROTTLE_AXI_CLK				127
+#define GCC_VIDEO_XO_CLK					128
+#define GCC_AGGRE_UFS_PHY_AXI_CLK				129
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK				130
+#define GCC_BOOT_ROM_AHB_CLK					131
+#define GCC_CAMERA_AHB_CLK					132
+#define GCC_CPUSS_GNOC_CLK					133
 
 #define GCC_DPM_BCR						0
 #define GCC_GPU_BCR						1
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h b/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
index 25d4411..65c9644 100644
--- a/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator-levels.h
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0-only */
 /*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
  */
 
 #ifndef __QCOM_RPMH_REGULATOR_LEVELS_H
@@ -19,4 +19,45 @@
 #define RPMH_REGULATOR_LEVEL_TURBO_L1	416
 #define RPMH_REGULATOR_LEVEL_MAX	65535
 
+/*
+ * These set constants may be used as the value for qcom,set of an RPMh
+ * resource device.
+ */
+#define RPMH_REGULATOR_SET_ACTIVE	1
+#define RPMH_REGULATOR_SET_SLEEP	2
+#define RPMH_REGULATOR_SET_ALL		3
+
+/*
+ * These mode constants may be used for qcom,supported-modes and qcom,init-mode
+ * properties of an RPMh resource.  Each type of regulator supports a subset of
+ * the possible modes.
+ *
+ * %RPMH_REGULATOR_MODE_PASS:	Pass-through mode in which output is directly
+ *				tied to input.  This mode is only supported by
+ *				BOB type regulators.
+ * %RPMH_REGULATOR_MODE_RET:	Retention mode in which only an extremely small
+ *				load current is allowed.  This mode is supported
+ *				by LDO and SMPS type regulators.
+ * %RPMH_REGULATOR_MODE_LPM:	Low power mode in which a small load current is
+ *				allowed.  This mode corresponds to PFM for SMPS
+ *				and BOB type regulators.  This mode is supported
+ *				by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type
+ *				regulators.
+ * %RPMH_REGULATOR_MODE_AUTO:	Auto mode in which the regulator hardware
+ *				automatically switches between LPM and HPM based
+ *				upon the real-time load current.  This mode is
+ *				supported by HFSMPS, BOB, and PMIC4 FTSMPS type
+ *				regulators.
+ * %RPMH_REGULATOR_MODE_HPM:	High power mode in which the full rated current
+ *				of the regulator is allowed.  This mode
+ *				corresponds to PWM for SMPS and BOB type
+ *				regulators.  This mode is supported by all types
+ *				of regulators.
+ */
+#define RPMH_REGULATOR_MODE_PASS	0
+#define RPMH_REGULATOR_MODE_RET		1
+#define RPMH_REGULATOR_MODE_LPM		2
+#define RPMH_REGULATOR_MODE_AUTO	3
+#define RPMH_REGULATOR_MODE_HPM		4
+
 #endif
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 606b706..56ad38f 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -398,6 +398,7 @@
  *	             callback for RE3 using GSI_CHAN_EVT_EOT
  *
  * @err_cb:          error notification callback
+ * @cleanup_cb;	     cleanup rx-pkt/skb callback
  * @chan_user_data:  cookie used for notifications
  *
  * All the callbacks are in interrupt context
@@ -420,6 +421,7 @@
 	uint8_t empty_lvl_threshold;
 	void (*xfer_cb)(struct gsi_chan_xfer_notify *notify);
 	void (*err_cb)(struct gsi_chan_err_notify *notify);
+	void (*cleanup_cb)(void *chan_user_data, void *xfer_user_data);
 	void *chan_user_data;
 };
 
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 17ae4d8..4377410 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -161,6 +161,9 @@
 extern int mod_timer(struct timer_list *timer, unsigned long expires);
 extern int mod_timer_pending(struct timer_list *timer, unsigned long expires);
 extern int timer_reduce(struct timer_list *timer, unsigned long expires);
+#ifdef CONFIG_SMP
+extern bool check_pending_deferrable_timers(int cpu);
+#endif
 
 /*
  * The jiffies value which is added to now, when there is no timer
diff --git a/include/soc/qcom/subsystem_notif.h b/include/soc/qcom/subsystem_notif.h
index 729f0e6..7d1bbbb 100644
--- a/include/soc/qcom/subsystem_notif.h
+++ b/include/soc/qcom/subsystem_notif.h
@@ -25,6 +25,11 @@
 	SUBSYS_NOTIF_TYPE_COUNT
 };
 
+enum early_subsys_notif_type {
+	XPORT_LAYER_NOTIF,
+	NUM_EARLY_NOTIFS
+};
+
 #if defined(CONFIG_MSM_SUBSYSTEM_RESTART)
 /* Use the subsys_notif_register_notifier API to register for notifications for
  * a particular subsystem. This API will return a handle that can be used to
@@ -50,6 +55,14 @@
 int subsys_notif_queue_notification(void *subsys_handle,
 					enum subsys_notif_type notif_type,
 					void *data);
+void *subsys_get_early_notif_info(const char *subsys_name);
+int subsys_register_early_notifier(const char *subsys_name,
+				   enum early_subsys_notif_type notif_type,
+				   void (*early_notif_cb)(void *),
+				   void *data);
+int subsys_unregister_early_notifier(const char *subsys_name, enum
+				     early_subsys_notif_type notif_type);
+void send_early_notifications(void *early_notif_handle);
 #else
 
 static inline void *subsys_notif_register_notifier(
@@ -75,6 +88,30 @@
 {
 	return 0;
 }
+
+static inline void *subsys_get_early_notif_info(const char *subsys_name)
+{
+	return NULL;
+}
+
+static inline int subsys_register_early_notifier(const char *subsys_name,
+				   enum early_subsys_notif_type notif_type,
+				   void (*early_notif_cb)(void *),
+				   void *data)
+{
+	return -ENOTSUPP;
+}
+
+static inline int subsys_unregister_early_notifier(const char *subsys_name,
+						   enum early_subsys_notif_type
+						   notif_type)
+{
+	return -ENOTSUPP;
+}
+
+static inline void send_early_notifications(void *early_notif_handle)
+{
+}
 #endif /* CONFIG_MSM_SUBSYSTEM_RESTART */
 
 #endif
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 0e71980..21b6acf 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -571,6 +571,60 @@
 
 	TP_ARGS(name, type, new_value)
 );
+
+TRACE_EVENT(sugov_util_update,
+	    TP_PROTO(int cpu,
+		     unsigned long util, unsigned long avg_cap,
+		     unsigned long max_cap, unsigned long nl, unsigned long pl,
+		     unsigned int flags),
+	    TP_ARGS(cpu, util, avg_cap, max_cap, nl, pl, flags),
+	    TP_STRUCT__entry(
+		    __field(int, cpu)
+		    __field(unsigned long, util)
+		    __field(unsigned long, avg_cap)
+		    __field(unsigned long, max_cap)
+		    __field(unsigned long, nl)
+		    __field(unsigned long, pl)
+		    __field(unsigned int, flags)
+	    ),
+	    TP_fast_assign(
+		    __entry->cpu = cpu;
+		    __entry->util = util;
+		    __entry->avg_cap = avg_cap;
+		    __entry->max_cap = max_cap;
+		    __entry->nl = nl;
+		    __entry->pl = pl;
+		    __entry->flags = flags;
+	    ),
+	    TP_printk("cpu=%d util=%lu avg_cap=%lu max_cap=%lu nl=%lu pl=%lu flags=0x%x",
+		      __entry->cpu, __entry->util, __entry->avg_cap,
+		      __entry->max_cap, __entry->nl,
+		      __entry->pl, __entry->flags)
+);
+
+TRACE_EVENT(sugov_next_freq,
+	    TP_PROTO(unsigned int cpu, unsigned long util, unsigned long max,
+		     unsigned int freq),
+	    TP_ARGS(cpu, util, max, freq),
+	    TP_STRUCT__entry(
+		    __field(unsigned int, cpu)
+		    __field(unsigned long, util)
+		    __field(unsigned long, max)
+		    __field(unsigned int, freq)
+	    ),
+	    TP_fast_assign(
+		    __entry->cpu = cpu;
+		    __entry->util = util;
+		    __entry->max = max;
+		    __entry->freq = freq;
+	    ),
+	    TP_printk("cpu=%u util=%lu max=%lu freq=%u",
+		      __entry->cpu,
+		      __entry->util,
+		      __entry->max,
+		      __entry->freq)
+);
+
 #endif /* _TRACE_POWER_H */
 
 /* This part must be outside protection */
diff --git a/include/uapi/linux/spcom.h b/include/uapi/linux/spcom.h
new file mode 100644
index 0000000..5416e01
--- /dev/null
+++ b/include/uapi/linux/spcom.h
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _UAPI_SPCOM_H_
+#define _UAPI_SPCOM_H_
+
+#include <linux/types.h>	/* uint32_t, bool */
+#ifndef BIT
+	#define BIT(x) (1 << x)
+#endif
+
+#ifndef PAGE_SIZE
+	#define PAGE_SIZE 4096
+#endif
+
+/**
+ * @brief - Secure Processor Communication interface to user space spcomlib.
+ *
+ * Sending data and control commands by write() file operation.
+ * Receiving data by read() file operation.
+ * Getting the next request size by read() file operation,
+ * with special size SPCOM_GET_NEXT_REQUEST_SIZE.
+ */
+
+/*
+ * Maximum number of channel between Secure Processor and HLOS.
+ * including predefined channels, like "sp_kernel".
+ */
+#define SPCOM_MAX_CHANNELS	0x20
+
+/* Maximum size (including null) for channel names */
+#define SPCOM_CHANNEL_NAME_SIZE		32
+/*
+ * file read(fd, buf, size) with this size,
+ * hints the kernel that user space wants to read the next-req-size.
+ * This size is bigger than both SPCOM_MAX_REQUEST_SIZE and
+ * SPCOM_MAX_RESPONSE_SIZE , so it is not a valid data size.
+ */
+#define SPCOM_GET_NEXT_REQUEST_SIZE	(PAGE_SIZE-1)
+
+/* Command Id between spcomlib and spcom driver, on write() */
+enum spcom_cmd_id {
+	SPCOM_CMD_LOAD_APP	= 0x4C4F4144, /* "LOAD" = 0x4C4F4144 */
+	SPCOM_CMD_RESET_SP	= 0x52455354, /* "REST" = 0x52455354 */
+	SPCOM_CMD_SEND		= 0x53454E44, /* "SEND" = 0x53454E44 */
+	SPCOM_CMD_SEND_MODIFIED	= 0x534E444D, /* "SNDM" = 0x534E444D */
+	SPCOM_CMD_LOCK_ION_BUF  = 0x4C4F434B, /* "LOCK" = 0x4C4F434B */
+	SPCOM_CMD_UNLOCK_ION_BUF = 0x554C434B, /* "ULCK" = 0x4C4F434B */
+	SPCOM_CMD_FSSR		= 0x46535352, /* "FSSR" = 0x46535352 */
+	SPCOM_CMD_CREATE_CHANNEL = 0x43524554, /* "CRET" = 0x43524554 */
+};
+
+/*
+ * @note: Event types that are always implicitly polled:
+ * POLLERR=0x08 | POLLHUP=0x10 | POLLNVAL=0x20
+ * so bits 3,4,5 can't be used
+ */
+enum spcom_poll_events {
+	SPCOM_POLL_LINK_STATE	= BIT(1),
+	SPCOM_POLL_CH_CONNECT	= BIT(2),
+	SPCOM_POLL_READY_FLAG	= BIT(14), /* output */
+	SPCOM_POLL_WAIT_FLAG	= BIT(15), /* if set , wait for the event */
+};
+
+/* Common Command structure between User Space and spcom driver, on write() */
+struct spcom_user_command {
+	enum spcom_cmd_id cmd_id;
+	uint32_t arg;
+} __packed;
+
+/* Command structure between User Space and spcom driver, on write() */
+struct spcom_send_command {
+	enum spcom_cmd_id cmd_id;
+	uint32_t timeout_msec;
+	uint32_t buf_size;
+	char buf[0]; /* Variable buffer size - must be last field */
+} __packed;
+
+/* Command structure between userspace spcomlib and spcom driver, on write() */
+struct spcom_user_create_channel_command {
+	enum spcom_cmd_id cmd_id;
+	char ch_name[SPCOM_CHANNEL_NAME_SIZE];
+} __packed;
+
+/* maximum ION buf for send-modfied-command */
+#define SPCOM_MAX_ION_BUF 4
+
+struct spcom_ion_info {
+	int32_t fd; /* ION buffer File Descriptor, set -1 for invalid fd */
+	uint32_t buf_offset; /* virtual address offset in request/response */
+};
+
+/* Pass this FD to unlock all ION buffer for the specific channel */
+#define SPCOM_ION_FD_UNLOCK_ALL	0xFFFF
+
+struct spcom_ion_handle {
+	int32_t fd;		/* File Descriptor associated with the buffer */
+};
+
+/* Command structure between User Space and spcom driver, on write() */
+struct spcom_user_send_modified_command {
+	enum spcom_cmd_id cmd_id;
+	struct spcom_ion_info ion_info[SPCOM_MAX_ION_BUF];
+	uint32_t timeout_msec;
+	uint32_t buf_size;
+	char buf[0]; /* Variable buffer size - must be last field */
+} __packed;
+
+
+#endif /* _UAPI_SPCOM_H_ */
diff --git a/include/uapi/media/cam_defs.h b/include/uapi/media/cam_defs.h
index 73d2c89..86fe3ec 100644
--- a/include/uapi/media/cam_defs.h
+++ b/include/uapi/media/cam_defs.h
@@ -169,7 +169,9 @@
 #define CAM_FORMAT_YV12                         44
 #define CAM_FORMAT_Y_ONLY                       45
 #define CAM_FORMAT_DPCM_12_10_12                46
-#define CAM_FORMAT_MAX                          47
+#define CAM_FORMAT_PLAIN32                      47
+#define CAM_FORMAT_ARGB_16                      48
+#define CAM_FORMAT_MAX                          49
 
 /* camera rotaion */
 #define CAM_ROTATE_CW_0_DEGREE                  0
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index f1841ce..4eec9d4 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -15,18 +15,28 @@
 
 #include <linux/sched/cpufreq.h>
 #include <trace/events/power.h>
+#include <linux/sched/sysctl.h>
 
 struct sugov_tunables {
 	struct gov_attr_set	attr_set;
 	unsigned int		up_rate_limit_us;
 	unsigned int		down_rate_limit_us;
+	unsigned int		hispeed_load;
+	unsigned int		hispeed_freq;
+	bool			pl;
 };
 
 struct sugov_policy {
 	struct cpufreq_policy	*policy;
 
+	u64 last_ws;
+	u64 curr_cycles;
+	u64 last_cyc_update_time;
+	unsigned long avg_cap;
 	struct sugov_tunables	*tunables;
 	struct list_head	tunables_hook;
+	unsigned long hispeed_util;
+	unsigned long max;
 
 	raw_spinlock_t		update_lock;	/* For shared policies */
 	u64			last_freq_update_time;
@@ -57,6 +67,11 @@
 	unsigned int		iowait_boost_max;
 	u64			last_update;
 
+	struct sched_walt_cpu_load walt_load;
+
+	unsigned long util;
+	unsigned int flags;
+
 	unsigned long		bw_dl;
 	unsigned long		max;
 
@@ -67,6 +82,8 @@
 };
 
 static DEFINE_PER_CPU(struct sugov_cpu, sugov_cpu);
+static unsigned int stale_ns;
+static DEFINE_PER_CPU(struct sugov_tunables *, cached_tunables);
 
 /************************ Governor internals ***********************/
 
@@ -107,6 +124,15 @@
 	return delta_ns >= sg_policy->min_rate_limit_ns;
 }
 
+static inline bool use_pelt(void)
+{
+#ifdef CONFIG_SCHED_WALT
+	return (!sysctl_sched_use_walt_cpu_util || walt_disabled);
+#else
+	return true;
+#endif
+}
+
 static bool sugov_up_down_rate_limit(struct sugov_policy *sg_policy, u64 time,
 				     unsigned int next_freq)
 {
@@ -140,20 +166,77 @@
 	return true;
 }
 
+static unsigned long freq_to_util(struct sugov_policy *sg_policy,
+				  unsigned int freq)
+{
+	return mult_frac(sg_policy->max, freq,
+			 sg_policy->policy->cpuinfo.max_freq);
+}
+
+#define KHZ 1000
+static void sugov_track_cycles(struct sugov_policy *sg_policy,
+				unsigned int prev_freq,
+				u64 upto)
+{
+	u64 delta_ns, cycles;
+
+	if (unlikely(!sysctl_sched_use_walt_cpu_util))
+		return;
+
+	/* Track cycles in current window */
+	delta_ns = upto - sg_policy->last_cyc_update_time;
+	delta_ns *= prev_freq;
+	do_div(delta_ns, (NSEC_PER_SEC / KHZ));
+	cycles = delta_ns;
+	sg_policy->curr_cycles += cycles;
+	sg_policy->last_cyc_update_time = upto;
+}
+
+static void sugov_calc_avg_cap(struct sugov_policy *sg_policy, u64 curr_ws,
+				unsigned int prev_freq)
+{
+	u64 last_ws = sg_policy->last_ws;
+	unsigned int avg_freq;
+
+	if (unlikely(!sysctl_sched_use_walt_cpu_util))
+		return;
+
+	BUG_ON(curr_ws < last_ws);
+	if (curr_ws <= last_ws)
+		return;
+
+	/* If we skipped some windows */
+	if (curr_ws > (last_ws + sched_ravg_window)) {
+		avg_freq = prev_freq;
+		/* Reset tracking history */
+		sg_policy->last_cyc_update_time = curr_ws;
+	} else {
+		sugov_track_cycles(sg_policy, prev_freq, curr_ws);
+		avg_freq = sg_policy->curr_cycles;
+		avg_freq /= sched_ravg_window / (NSEC_PER_SEC / KHZ);
+	}
+	sg_policy->avg_cap = freq_to_util(sg_policy, avg_freq);
+	sg_policy->curr_cycles = 0;
+	sg_policy->last_ws = curr_ws;
+}
+
 static void sugov_fast_switch(struct sugov_policy *sg_policy, u64 time,
 			      unsigned int next_freq)
 {
 	struct cpufreq_policy *policy = sg_policy->policy;
+	unsigned int cpu;
 
 	if (!sugov_update_next_freq(sg_policy, time, next_freq))
 		return;
 
+	sugov_track_cycles(sg_policy, sg_policy->policy->cur, time);
 	next_freq = cpufreq_driver_fast_switch(policy, next_freq);
 	if (!next_freq)
 		return;
 
 	policy->cur = next_freq;
-	trace_cpu_frequency(next_freq, smp_processor_id());
+	for_each_cpu(cpu, policy->cpus)
+		trace_cpu_frequency(next_freq, smp_processor_id());
 }
 
 static void sugov_deferred_update(struct sugov_policy *sg_policy, u64 time,
@@ -162,12 +245,12 @@
 	if (!sugov_update_next_freq(sg_policy, time, next_freq))
 		return;
 
-	if (!sg_policy->work_in_progress) {
+	if (use_pelt())
 		sg_policy->work_in_progress = true;
-		irq_work_queue(&sg_policy->irq_work);
-	}
+	irq_work_queue(&sg_policy->irq_work);
 }
 
+#define TARGET_LOAD 80
 /**
  * get_next_freq - Compute a new frequency for a given cpufreq policy.
  * @sg_policy: schedutil policy object to compute the new frequency for.
@@ -198,6 +281,7 @@
 				policy->cpuinfo.max_freq : policy->cur;
 
 	freq = map_util_freq(util, freq, max);
+	trace_sugov_next_freq(policy->cpu, util, max, freq);
 
 	if (freq == sg_policy->cached_raw_freq && !sg_policy->need_freq_update)
 		return sg_policy->next_freq;
@@ -313,13 +397,21 @@
 static unsigned long sugov_get_util(struct sugov_cpu *sg_cpu)
 {
 	struct rq *rq = cpu_rq(sg_cpu->cpu);
-	unsigned long util = boosted_cpu_util(sg_cpu->cpu, cpu_util_rt(rq));
 	unsigned long max = arch_scale_cpu_capacity(NULL, sg_cpu->cpu);
+#ifdef CONFIG_SCHED_WALT
+
+	sg_cpu->max = max;
+	sg_cpu->bw_dl = cpu_bw_dl(rq);
+
+	return boosted_cpu_util(sg_cpu->cpu, 0, &sg_cpu->walt_load);
+#else
+	unsigned long util = boosted_cpu_util(sg_cpu->cpu, cpu_util_rt(rq));
 
 	sg_cpu->max = max;
 	sg_cpu->bw_dl = cpu_bw_dl(rq);
 
 	return schedutil_freq_util(sg_cpu->cpu, util, max, FREQUENCY_UTIL);
+#endif
 }
 
 /**
@@ -468,6 +560,34 @@
 static inline bool sugov_cpu_is_busy(struct sugov_cpu *sg_cpu) { return false; }
 #endif /* CONFIG_NO_HZ_COMMON */
 
+#define NL_RATIO 75
+#define DEFAULT_HISPEED_LOAD 90
+static void sugov_walt_adjust(struct sugov_cpu *sg_cpu, unsigned long *util,
+			      unsigned long *max)
+{
+	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+	bool is_migration = sg_cpu->flags & SCHED_CPUFREQ_INTERCLUSTER_MIG;
+	unsigned long nl = sg_cpu->walt_load.nl;
+	unsigned long cpu_util = sg_cpu->util;
+	bool is_hiload;
+
+	if (unlikely(!sysctl_sched_use_walt_cpu_util))
+		return;
+
+	is_hiload = (cpu_util >= mult_frac(sg_policy->avg_cap,
+					   sg_policy->tunables->hispeed_load,
+					   100));
+
+	if (is_hiload && !is_migration)
+		*util = max(*util, sg_policy->hispeed_util);
+
+	if (is_hiload && nl >= mult_frac(cpu_util, NL_RATIO, 100))
+		*util = *max;
+
+	if (sg_policy->tunables->pl)
+		*util = max(*util, sg_cpu->walt_load.pl);
+}
+
 /*
  * Make sugov_should_update_freq() ignore the rate limit when DL
  * has increased the utilization.
@@ -483,10 +603,13 @@
 {
 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
-	unsigned long util, max;
+	unsigned long util, max, hs_util;
 	unsigned int next_f;
 	bool busy;
 
+	if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
+		return;
+
 	sugov_iowait_boost(sg_cpu, time, flags);
 	sg_cpu->last_update = time;
 
@@ -495,11 +618,29 @@
 	if (!sugov_should_update_freq(sg_policy, time))
 		return;
 
-	busy = sugov_cpu_is_busy(sg_cpu);
+	busy = use_pelt() && sugov_cpu_is_busy(sg_cpu);
 
-	util = sugov_get_util(sg_cpu);
+	sg_cpu->util = util = sugov_get_util(sg_cpu);
 	max = sg_cpu->max;
+	sg_cpu->flags = flags;
+
+	if (sg_policy->max != max) {
+		sg_policy->max = max;
+		hs_util = freq_to_util(sg_policy,
+				       sg_policy->tunables->hispeed_freq);
+		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
+		sg_policy->hispeed_util = hs_util;
+	}
+
 	sugov_iowait_apply(sg_cpu, time, &util, &max);
+	sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+			   sg_policy->policy->cur);
+
+	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util,
+				sg_policy->avg_cap, max, sg_cpu->walt_load.nl,
+				sg_cpu->walt_load.pl, flags);
+
+	sugov_walt_adjust(sg_cpu, &util, &max);
 	next_f = get_next_freq(sg_policy, util, max);
 	/*
 	 * Do not reduce the frequency if the CPU has not been idle
@@ -530,21 +671,46 @@
 {
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
 	struct cpufreq_policy *policy = sg_policy->policy;
+	u64 last_freq_update_time = sg_policy->last_freq_update_time;
 	unsigned long util = 0, max = 1;
 	unsigned int j;
 
 	for_each_cpu(j, policy->cpus) {
 		struct sugov_cpu *j_sg_cpu = &per_cpu(sugov_cpu, j);
 		unsigned long j_util, j_max;
+		s64 delta_ns;
 
-		j_util = sugov_get_util(j_sg_cpu);
+		/*
+		 * If the CPU utilization was last updated before the previous
+		 * frequency update and the time elapsed between the last update
+		 * of the CPU utilization and the last frequency update is long
+		 * enough, don't take the CPU into account as it probably is
+		 * idle now (and clear iowait_boost for it).
+		 */
+		delta_ns = last_freq_update_time - j_sg_cpu->last_update;
+		if (delta_ns > stale_ns) {
+			sugov_iowait_reset(j_sg_cpu, last_freq_update_time,
+					   false);
+			continue;
+		}
+
+		/*
+		 * If the util value for all CPUs in a policy is 0, just using >
+		 * will result in a max value of 1. WALT stats can later update
+		 * the aggregated util value, causing get_next_freq() to compute
+		 * freq = max_freq * 1.25 * (util / max) for nonzero util,
+		 * leading to spurious jumps to fmax.
+		 */
+		j_util = j_sg_cpu->util;
 		j_max = j_sg_cpu->max;
 		sugov_iowait_apply(j_sg_cpu, time, &j_util, &j_max);
 
-		if (j_util * max > j_max * util) {
+		if (j_util * max >= j_max * util) {
 			util = j_util;
 			max = j_max;
 		}
+
+		sugov_walt_adjust(j_sg_cpu, &util, &max);
 	}
 
 	return get_next_freq(sg_policy, util, max);
@@ -555,16 +721,37 @@
 {
 	struct sugov_cpu *sg_cpu = container_of(hook, struct sugov_cpu, update_util);
 	struct sugov_policy *sg_policy = sg_cpu->sg_policy;
+	unsigned long hs_util;
 	unsigned int next_f;
 
+	if (!sg_policy->tunables->pl && flags & SCHED_CPUFREQ_PL)
+		return;
+
+	sg_cpu->util = sugov_get_util(sg_cpu);
+	sg_cpu->flags = flags;
 	raw_spin_lock(&sg_policy->update_lock);
 
+	if (sg_policy->max != sg_cpu->max) {
+		sg_policy->max = sg_cpu->max;
+		hs_util = freq_to_util(sg_policy,
+					sg_policy->tunables->hispeed_freq);
+		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
+		sg_policy->hispeed_util = hs_util;
+	}
+
 	sugov_iowait_boost(sg_cpu, time, flags);
 	sg_cpu->last_update = time;
 
+	sugov_calc_avg_cap(sg_policy, sg_cpu->walt_load.ws,
+			   sg_policy->policy->cur);
 	ignore_dl_rate_limit(sg_cpu, sg_policy);
 
-	if (sugov_should_update_freq(sg_policy, time)) {
+	trace_sugov_util_update(sg_cpu->cpu, sg_cpu->util, sg_policy->avg_cap,
+				sg_cpu->max, sg_cpu->walt_load.nl,
+				sg_cpu->walt_load.pl, flags);
+
+	if (sugov_should_update_freq(sg_policy, time) &&
+	    !(flags & SCHED_CPUFREQ_CONTINUE)) {
 		next_f = sugov_next_freq_shared(sg_cpu, time);
 
 		if (sg_policy->policy->fast_switch_enabled)
@@ -594,7 +781,10 @@
 	 */
 	raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
 	freq = sg_policy->next_freq;
-	sg_policy->work_in_progress = false;
+	if (use_pelt())
+		sg_policy->work_in_progress = false;
+	sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+			   ktime_get_ns());
 	raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 
 	mutex_lock(&sg_policy->work_lock);
@@ -635,14 +825,14 @@
 {
 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
 
-	return sprintf(buf, "%u\n", tunables->up_rate_limit_us);
+	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->up_rate_limit_us);
 }
 
 static ssize_t down_rate_limit_us_show(struct gov_attr_set *attr_set, char *buf)
 {
 	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
 
-	return sprintf(buf, "%u\n", tunables->down_rate_limit_us);
+	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->down_rate_limit_us);
 }
 
 static ssize_t up_rate_limit_us_store(struct gov_attr_set *attr_set,
@@ -688,9 +878,86 @@
 static struct governor_attr up_rate_limit_us = __ATTR_RW(up_rate_limit_us);
 static struct governor_attr down_rate_limit_us = __ATTR_RW(down_rate_limit_us);
 
+static ssize_t hispeed_load_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_load);
+}
+
+static ssize_t hispeed_load_store(struct gov_attr_set *attr_set,
+				  const char *buf, size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	if (kstrtouint(buf, 10, &tunables->hispeed_load))
+		return -EINVAL;
+
+	tunables->hispeed_load = min(100U, tunables->hispeed_load);
+
+	return count;
+}
+
+static ssize_t hispeed_freq_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->hispeed_freq);
+}
+
+static ssize_t hispeed_freq_store(struct gov_attr_set *attr_set,
+					const char *buf, size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+	unsigned int val;
+	struct sugov_policy *sg_policy;
+	unsigned long hs_util;
+	unsigned long flags;
+
+	if (kstrtouint(buf, 10, &val))
+		return -EINVAL;
+
+	tunables->hispeed_freq = val;
+	list_for_each_entry(sg_policy, &attr_set->policy_list, tunables_hook) {
+		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
+		hs_util = freq_to_util(sg_policy,
+					sg_policy->tunables->hispeed_freq);
+		hs_util = mult_frac(hs_util, TARGET_LOAD, 100);
+		sg_policy->hispeed_util = hs_util;
+		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
+	}
+
+	return count;
+}
+
+static ssize_t pl_show(struct gov_attr_set *attr_set, char *buf)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	return scnprintf(buf, PAGE_SIZE, "%u\n", tunables->pl);
+}
+
+static ssize_t pl_store(struct gov_attr_set *attr_set, const char *buf,
+				   size_t count)
+{
+	struct sugov_tunables *tunables = to_sugov_tunables(attr_set);
+
+	if (kstrtobool(buf, &tunables->pl))
+		return -EINVAL;
+
+	return count;
+}
+
+static struct governor_attr hispeed_load = __ATTR_RW(hispeed_load);
+static struct governor_attr hispeed_freq = __ATTR_RW(hispeed_freq);
+static struct governor_attr pl = __ATTR_RW(pl);
+
 static struct attribute *sugov_attributes[] = {
 	&up_rate_limit_us.attr,
 	&down_rate_limit_us.attr,
+	&hispeed_load.attr,
+	&hispeed_freq.attr,
+	&pl.attr,
 	NULL
 };
 
@@ -796,6 +1063,31 @@
 	return tunables;
 }
 
+static void sugov_tunables_save(struct cpufreq_policy *policy,
+		struct sugov_tunables *tunables)
+{
+	int cpu;
+	struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
+
+	if (!have_governor_per_policy())
+		return;
+
+	if (!cached) {
+		cached = kzalloc(sizeof(*tunables), GFP_KERNEL);
+		if (!cached)
+			return;
+
+		for_each_cpu(cpu, policy->related_cpus)
+			per_cpu(cached_tunables, cpu) = cached;
+	}
+
+	cached->pl = tunables->pl;
+	cached->hispeed_load = tunables->hispeed_load;
+	cached->hispeed_freq = tunables->hispeed_freq;
+	cached->up_rate_limit_us = tunables->up_rate_limit_us;
+	cached->down_rate_limit_us = tunables->down_rate_limit_us;
+}
+
 static void sugov_tunables_free(struct sugov_tunables *tunables)
 {
 	if (!have_governor_per_policy())
@@ -804,6 +1096,22 @@
 	kfree(tunables);
 }
 
+static void sugov_tunables_restore(struct cpufreq_policy *policy)
+{
+	struct sugov_policy *sg_policy = policy->governor_data;
+	struct sugov_tunables *tunables = sg_policy->tunables;
+	struct sugov_tunables *cached = per_cpu(cached_tunables, policy->cpu);
+
+	if (!cached)
+		return;
+
+	tunables->pl = cached->pl;
+	tunables->hispeed_load = cached->hispeed_load;
+	tunables->hispeed_freq = cached->hispeed_freq;
+	tunables->up_rate_limit_us = cached->up_rate_limit_us;
+	tunables->down_rate_limit_us = cached->down_rate_limit_us;
+}
+
 static int sugov_init(struct cpufreq_policy *policy)
 {
 	struct sugov_policy *sg_policy;
@@ -848,9 +1156,14 @@
 
 	tunables->up_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
 	tunables->down_rate_limit_us = cpufreq_policy_transition_delay_us(policy);
+	tunables->hispeed_load = DEFAULT_HISPEED_LOAD;
+	tunables->hispeed_freq = 0;
 
 	policy->governor_data = sg_policy;
 	sg_policy->tunables = tunables;
+	stale_ns = sched_ravg_window + (sched_ravg_window >> 3);
+
+	sugov_tunables_restore(policy);
 
 	ret = kobject_init_and_add(&tunables->attr_set.kobj, &sugov_tunables_ktype,
 				   get_governor_parent_kobj(policy), "%s",
@@ -890,8 +1203,10 @@
 
 	count = gov_attr_set_put(&tunables->attr_set, &sg_policy->tunables_hook);
 	policy->governor_data = NULL;
-	if (!count)
+	if (!count) {
+		sugov_tunables_save(policy, tunables);
 		sugov_tunables_free(tunables);
+	}
 
 	mutex_unlock(&global_tunables_lock);
 
@@ -955,9 +1270,14 @@
 static void sugov_limits(struct cpufreq_policy *policy)
 {
 	struct sugov_policy *sg_policy = policy->governor_data;
+	unsigned long flags;
 
 	if (!policy->fast_switch_enabled) {
 		mutex_lock(&sg_policy->work_lock);
+		raw_spin_lock_irqsave(&sg_policy->update_lock, flags);
+		sugov_track_cycles(sg_policy, sg_policy->policy->cur,
+				   ktime_get_ns());
+		raw_spin_unlock_irqrestore(&sg_policy->update_lock, flags);
 		cpufreq_policy_apply_limits(policy);
 		mutex_unlock(&sg_policy->work_lock);
 	}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cc91bda..fce85bb 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6005,9 +6005,10 @@
 }
 
 unsigned long
-boosted_cpu_util(int cpu, unsigned long other_util)
+boosted_cpu_util(int cpu, unsigned long other_util,
+		 struct sched_walt_cpu_load *walt_load)
 {
-	unsigned long util = cpu_util_cfs(cpu_rq(cpu)) + other_util;
+	unsigned long util = cpu_util_freq(cpu, walt_load) + other_util;
 	long margin = schedtune_cpu_margin(util, cpu);
 
 	trace_sched_boost_cpu(cpu, util, margin);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 06acc5b..5fc910c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2073,8 +2073,10 @@
 	return (delta >= capacity) ? capacity : delta;
 }
 
-
 #ifdef CONFIG_SCHED_WALT
+extern unsigned long boosted_cpu_util(int cpu, unsigned long other_util,
+				      struct sched_walt_cpu_load *walt_load);
+
 u64 freq_policy_load(struct rq *rq);
 
 extern u64 walt_load_reported_window;
@@ -2557,9 +2559,9 @@
 #endif
 
 	data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
-						  cpu_of(rq)));
+					cpu_of(rq)));
 	if (data)
-		data->func(data, rq_clock(rq), flags);
+		data->func(data, sched_ktime_clock(), flags);
 }
 #else
 static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
diff --git a/kernel/sched/tune.h b/kernel/sched/tune.h
index 821f026..8dd5e10 100644
--- a/kernel/sched/tune.h
+++ b/kernel/sched/tune.h
@@ -20,8 +20,6 @@
 void schedtune_enqueue_task(struct task_struct *p, int cpu);
 void schedtune_dequeue_task(struct task_struct *p, int cpu);
 
-unsigned long boosted_cpu_util(int cpu, unsigned long other_util);
-
 #else /* CONFIG_SCHED_TUNE */
 
 #define schedtune_cpu_boost(cpu)  0
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 85eb868..6a492f7 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -26,6 +26,7 @@
 #include <linux/module.h>
 #include <linux/irq_work.h>
 #include <linux/posix-timers.h>
+#include <linux/timer.h>
 #include <linux/context_tracking.h>
 #include <linux/mm.h>
 
@@ -920,6 +921,11 @@
 	ktime_t expires;
 	int cpu = smp_processor_id();
 
+#ifdef CONFIG_SMP
+	if (check_pending_deferrable_timers(cpu))
+		raise_softirq_irqoff(TIMER_SOFTIRQ);
+#endif
+
 	/*
 	 * If tick_nohz_get_sleep_length() ran tick_nohz_next_event(), the
 	 * tick timer expiration time is known already.
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 31bb91a..e0d0fd71 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -208,6 +208,7 @@
 
 static DEFINE_PER_CPU(struct timer_base, timer_bases[NR_BASES]);
 struct timer_base timer_base_deferrable;
+static atomic_t deferrable_pending;
 
 #ifdef CONFIG_NO_HZ_COMMON
 
@@ -1526,6 +1527,31 @@
 	return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
 }
 
+
+#ifdef CONFIG_SMP
+/*
+ * check_pending_deferrable_timers - Check for unbound deferrable timer expiry
+ * @cpu - Current CPU
+ *
+ * The function checks whether any global deferrable pending timers
+ * are exipired or not. This function does not check cpu bounded
+ * diferrable pending timers expiry.
+ *
+ * The function returns true when a cpu unbounded deferrable timer is expired.
+ */
+bool check_pending_deferrable_timers(int cpu)
+{
+	if (cpu == tick_do_timer_cpu ||
+		tick_do_timer_cpu == TICK_DO_TIMER_NONE) {
+		if (time_after_eq(jiffies, timer_base_deferrable.clk)
+			&& !atomic_cmpxchg(&deferrable_pending, 0, 1)) {
+			return true;
+		}
+	}
+	return false;
+}
+#endif
+
 /**
  * get_next_timer_interrupt - return the time (clock mono) of the next timer
  * @basej:	base time jiffies
@@ -1712,9 +1738,13 @@
 
 	__run_timers(base);
 	if (IS_ENABLED(CONFIG_NO_HZ_COMMON)) {
-		__run_timers(&timer_base_deferrable);
 		__run_timers(this_cpu_ptr(&timer_bases[BASE_DEF]));
 	}
+
+	if ((atomic_cmpxchg(&deferrable_pending, 1, 0) &&
+		tick_do_timer_cpu == TICK_DO_TIMER_NONE) ||
+		tick_do_timer_cpu == smp_processor_id())
+		__run_timers(&timer_base_deferrable);
 }
 
 /*